diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c9f0b0c7d6cdebea670170c9a16ad70e98e52de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b4232d55d5699b60ee05f36e51ab2cc3cfdf5f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f727d734217fb059dd763900b397f6bea630dd92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09629168dfe8e56b008bce97f0685e18c768aa6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9c5548ebb839aa7fc2b3e8d7aba616855eb4d92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..866d4e309c822c050f24c2df0609a00280f6fc8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3facdb13606cdf7d43b5c7d35e68f366d913aa44 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b6201b46e3c21f2e93c6641b1445f63583b536 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87a86714060a39a267dd46bbe6be9d5638316fca Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42cb27cf28ab1a818e5ba7c9d931304f16d5c8d9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d34e68ec4b428f8e2e255e13c6a8d939aa0b0319 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..641be25a1b0bf42584ee338ed6f8bf82dc9b4117 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d13579688c1c7b8cd889703cb043a56486c0b36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8750742a95c0871af9ae6094afee61c995b7ce5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f88b96d1240473203a555ba37dc5430ca24966f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fd78ae6f3981b52f899aed066e15b1bedbcd70e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/_util.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3b2ae5daffdbaf515a330a54a83e550751e29fdb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/_util.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from typing import Callable + +from pandas.compat._optional import import_optional_dependency + +import pandas as pd + + +def _arrow_dtype_mapping() -> dict: + pa = import_optional_dependency("pyarrow") + return { + pa.int8(): pd.Int8Dtype(), + pa.int16(): pd.Int16Dtype(), + pa.int32(): pd.Int32Dtype(), + pa.int64(): pd.Int64Dtype(), + pa.uint8(): pd.UInt8Dtype(), + pa.uint16(): pd.UInt16Dtype(), + pa.uint32(): pd.UInt32Dtype(), + pa.uint64(): pd.UInt64Dtype(), + pa.bool_(): pd.BooleanDtype(), + pa.string(): pd.StringDtype(), + pa.float32(): pd.Float32Dtype(), + pa.float64(): pd.Float64Dtype(), + } + + +def arrow_string_types_mapper() -> Callable: + pa = import_optional_dependency("pyarrow") + + return { + pa.string(): pd.StringDtype(storage="pyarrow_numpy"), + pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"), + }.get diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6491849925e863c35a98390a31729cb13e28ca19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py @@ -0,0 +1,747 @@ +""" +Pyperclip + +A cross-platform clipboard module for Python, +with copy & paste functions for plain text. +By Al Sweigart al@inventwithpython.com +Licence at LICENSES/PYPERCLIP_LICENSE + +Usage: + import pyperclip + pyperclip.copy('The text to be copied to the clipboard.') + spam = pyperclip.paste() + + if not pyperclip.is_available(): + print("Copy functionality unavailable!") + +On Windows, no additional modules are needed. +On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli + commands. (These commands should come with OS X.). +On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via +package manager. +For example, in Debian: + sudo apt-get install xclip + sudo apt-get install xsel + sudo apt-get install wl-clipboard + +Otherwise on Linux, you will need the PyQt5 modules installed. + +This module does not work with PyGObject yet. + +Cygwin is currently not supported. + +Security Note: This module runs programs with these names: + - pbcopy + - pbpaste + - xclip + - xsel + - wl-copy/wl-paste + - klipper + - qdbus +A malicious user could rename or add programs with these names, tricking +Pyperclip into running them with whatever permissions the Python process has. + +""" + +__version__ = "1.8.2" + + +import contextlib +import ctypes +from ctypes import ( + c_size_t, + c_wchar, + c_wchar_p, + get_errno, + sizeof, +) +import os +import platform +from shutil import which as _executable_exists +import subprocess +import time +import warnings + +from pandas.errors import ( + PyperclipException, + PyperclipWindowsException, +) +from pandas.util._exceptions import find_stack_level + +# `import PyQt4` sys.exit()s if DISPLAY is not in the environment. +# Thus, we need to detect the presence of $DISPLAY manually +# and not load PyQt4 if it is absent. +HAS_DISPLAY = os.getenv("DISPLAY") + +EXCEPT_MSG = """ + Pyperclip could not find a copy/paste mechanism for your system. + For more information, please visit + https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error + """ + +ENCODING = "utf-8" + + +class PyperclipTimeoutException(PyperclipException): + pass + + +def _stringifyText(text) -> str: + acceptedTypes = (str, int, float, bool) + if not isinstance(text, acceptedTypes): + raise PyperclipException( + f"only str, int, float, and bool values " + f"can be copied to the clipboard, not {type(text).__name__}" + ) + return str(text) + + +def init_osx_pbcopy_clipboard(): + def copy_osx_pbcopy(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_osx_pbcopy(): + with subprocess.Popen( + ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_osx_pbcopy, paste_osx_pbcopy + + +def init_osx_pyobjc_clipboard(): + def copy_osx_pyobjc(text): + """Copy string argument to clipboard""" + text = _stringifyText(text) # Converts non-str values to str. + newStr = Foundation.NSString.stringWithString_(text).nsstring() + newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding) + board = AppKit.NSPasteboard.generalPasteboard() + board.declareTypes_owner_([AppKit.NSStringPboardType], None) + board.setData_forType_(newData, AppKit.NSStringPboardType) + + def paste_osx_pyobjc(): + """Returns contents of clipboard""" + board = AppKit.NSPasteboard.generalPasteboard() + content = board.stringForType_(AppKit.NSStringPboardType) + return content + + return copy_osx_pyobjc, paste_osx_pyobjc + + +def init_qt_clipboard(): + global QApplication + # $DISPLAY should exist + + # Try to import from qtpy, but if that fails try PyQt5 then PyQt4 + try: + from qtpy.QtWidgets import QApplication + except ImportError: + try: + from PyQt5.QtWidgets import QApplication + except ImportError: + from PyQt4.QtGui import QApplication + + app = QApplication.instance() + if app is None: + app = QApplication([]) + + def copy_qt(text): + text = _stringifyText(text) # Converts non-str values to str. + cb = app.clipboard() + cb.setText(text) + + def paste_qt() -> str: + cb = app.clipboard() + return str(cb.text()) + + return copy_qt, paste_qt + + +def init_xclip_clipboard(): + DEFAULT_SELECTION = "c" + PRIMARY_SELECTION = "p" + + def copy_xclip(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xclip(primary=False): + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection, "-o"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # Intentionally ignore extraneous output on stderr when clipboard is empty + return stdout.decode(ENCODING) + + return copy_xclip, paste_xclip + + +def init_xsel_clipboard(): + DEFAULT_SELECTION = "-b" + PRIMARY_SELECTION = "-p" + + def copy_xsel(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xsel(primary=False): + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_xsel, paste_xsel + + +def init_wl_clipboard(): + PRIMARY_SELECTION = "-p" + + def copy_wl(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + args = ["wl-copy"] + if primary: + args.append(PRIMARY_SELECTION) + if not text: + args.append("--clear") + subprocess.check_call(args, close_fds=True) + else: + p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True) + p.communicate(input=text.encode(ENCODING)) + + def paste_wl(primary=False): + args = ["wl-paste", "-n"] + if primary: + args.append(PRIMARY_SELECTION) + p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True) + stdout, _stderr = p.communicate() + return stdout.decode(ENCODING) + + return copy_wl, paste_wl + + +def init_klipper_clipboard(): + def copy_klipper(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + [ + "qdbus", + "org.kde.klipper", + "/klipper", + "setClipboardContents", + text.encode(ENCODING), + ], + stdin=subprocess.PIPE, + close_fds=True, + ) as p: + p.communicate(input=None) + + def paste_klipper(): + with subprocess.Popen( + ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"], + stdout=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + + # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874 + # TODO: https://github.com/asweigart/pyperclip/issues/43 + clipboardContents = stdout.decode(ENCODING) + # even if blank, Klipper will append a newline at the end + assert len(clipboardContents) > 0 + # make sure that newline is there + assert clipboardContents.endswith("\n") + if clipboardContents.endswith("\n"): + clipboardContents = clipboardContents[:-1] + return clipboardContents + + return copy_klipper, paste_klipper + + +def init_dev_clipboard_clipboard(): + def copy_dev_clipboard(text): + text = _stringifyText(text) # Converts non-str values to str. + if text == "": + warnings.warn( + "Pyperclip cannot copy a blank string to the clipboard on Cygwin. " + "This is effectively a no-op.", + stacklevel=find_stack_level(), + ) + if "\r" in text: + warnings.warn( + "Pyperclip cannot handle \\r characters on Cygwin.", + stacklevel=find_stack_level(), + ) + + with open("/dev/clipboard", "w", encoding="utf-8") as fd: + fd.write(text) + + def paste_dev_clipboard() -> str: + with open("/dev/clipboard", encoding="utf-8") as fd: + content = fd.read() + return content + + return copy_dev_clipboard, paste_dev_clipboard + + +def init_no_clipboard(): + class ClipboardUnavailable: + def __call__(self, *args, **kwargs): + raise PyperclipException(EXCEPT_MSG) + + def __bool__(self) -> bool: + return False + + return ClipboardUnavailable(), ClipboardUnavailable() + + +# Windows-related clipboard functions: +class CheckedCall: + def __init__(self, f) -> None: + super().__setattr__("f", f) + + def __call__(self, *args): + ret = self.f(*args) + if not ret and get_errno(): + raise PyperclipWindowsException("Error calling " + self.f.__name__) + return ret + + def __setattr__(self, key, value): + setattr(self.f, key, value) + + +def init_windows_clipboard(): + global HGLOBAL, LPVOID, DWORD, LPCSTR, INT + global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE + from ctypes.wintypes import ( + BOOL, + DWORD, + HANDLE, + HGLOBAL, + HINSTANCE, + HMENU, + HWND, + INT, + LPCSTR, + LPVOID, + UINT, + ) + + windll = ctypes.windll + msvcrt = ctypes.CDLL("msvcrt") + + safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) + safeCreateWindowExA.argtypes = [ + DWORD, + LPCSTR, + LPCSTR, + DWORD, + INT, + INT, + INT, + INT, + HWND, + HMENU, + HINSTANCE, + LPVOID, + ] + safeCreateWindowExA.restype = HWND + + safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow) + safeDestroyWindow.argtypes = [HWND] + safeDestroyWindow.restype = BOOL + + OpenClipboard = windll.user32.OpenClipboard + OpenClipboard.argtypes = [HWND] + OpenClipboard.restype = BOOL + + safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard) + safeCloseClipboard.argtypes = [] + safeCloseClipboard.restype = BOOL + + safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard) + safeEmptyClipboard.argtypes = [] + safeEmptyClipboard.restype = BOOL + + safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData) + safeGetClipboardData.argtypes = [UINT] + safeGetClipboardData.restype = HANDLE + + safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData) + safeSetClipboardData.argtypes = [UINT, HANDLE] + safeSetClipboardData.restype = HANDLE + + safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc) + safeGlobalAlloc.argtypes = [UINT, c_size_t] + safeGlobalAlloc.restype = HGLOBAL + + safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock) + safeGlobalLock.argtypes = [HGLOBAL] + safeGlobalLock.restype = LPVOID + + safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock) + safeGlobalUnlock.argtypes = [HGLOBAL] + safeGlobalUnlock.restype = BOOL + + wcslen = CheckedCall(msvcrt.wcslen) + wcslen.argtypes = [c_wchar_p] + wcslen.restype = UINT + + GMEM_MOVEABLE = 0x0002 + CF_UNICODETEXT = 13 + + @contextlib.contextmanager + def window(): + """ + Context that provides a valid Windows hwnd. + """ + # we really just need the hwnd, so setting "STATIC" + # as predefined lpClass is just fine. + hwnd = safeCreateWindowExA( + 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None + ) + try: + yield hwnd + finally: + safeDestroyWindow(hwnd) + + @contextlib.contextmanager + def clipboard(hwnd): + """ + Context manager that opens the clipboard and prevents + other applications from modifying the clipboard content. + """ + # We may not get the clipboard handle immediately because + # some other application is accessing it (?) + # We try for at least 500ms to get the clipboard. + t = time.time() + 0.5 + success = False + while time.time() < t: + success = OpenClipboard(hwnd) + if success: + break + time.sleep(0.01) + if not success: + raise PyperclipWindowsException("Error calling OpenClipboard") + + try: + yield + finally: + safeCloseClipboard() + + def copy_windows(text): + # This function is heavily based on + # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard + + text = _stringifyText(text) # Converts non-str values to str. + + with window() as hwnd: + # http://msdn.com/ms649048 + # If an application calls OpenClipboard with hwnd set to NULL, + # EmptyClipboard sets the clipboard owner to NULL; + # this causes SetClipboardData to fail. + # => We need a valid hwnd to copy something. + with clipboard(hwnd): + safeEmptyClipboard() + + if text: + # http://msdn.com/ms649051 + # If the hMem parameter identifies a memory object, + # the object must have been allocated using the + # function with the GMEM_MOVEABLE flag. + count = wcslen(text) + 1 + handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) + locked_handle = safeGlobalLock(handle) + + ctypes.memmove( + c_wchar_p(locked_handle), + c_wchar_p(text), + count * sizeof(c_wchar), + ) + + safeGlobalUnlock(handle) + safeSetClipboardData(CF_UNICODETEXT, handle) + + def paste_windows(): + with clipboard(None): + handle = safeGetClipboardData(CF_UNICODETEXT) + if not handle: + # GetClipboardData may return NULL with errno == NO_ERROR + # if the clipboard is empty. + # (Also, it may return a handle to an empty buffer, + # but technically that's not empty) + return "" + return c_wchar_p(handle).value + + return copy_windows, paste_windows + + +def init_wsl_clipboard(): + def copy_wsl(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_wsl(): + with subprocess.Popen( + ["powershell.exe", "-command", "Get-Clipboard"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # WSL appends "\r\n" to the contents. + return stdout[:-2].decode(ENCODING) + + return copy_wsl, paste_wsl + + +# Automatic detection of clipboard mechanisms +# and importing is done in determine_clipboard(): +def determine_clipboard(): + """ + Determine the OS/platform and set the copy() and paste() functions + accordingly. + """ + global Foundation, AppKit, qtpy, PyQt4, PyQt5 + + # Setup for the CYGWIN platform: + if ( + "cygwin" in platform.system().lower() + ): # Cygwin has a variety of values returned by platform.system(), + # such as 'CYGWIN_NT-6.1' + # FIXME(pyperclip#55): pyperclip currently does not support Cygwin, + # see https://github.com/asweigart/pyperclip/issues/55 + if os.path.exists("/dev/clipboard"): + warnings.warn( + "Pyperclip's support for Cygwin is not perfect, " + "see https://github.com/asweigart/pyperclip/issues/55", + stacklevel=find_stack_level(), + ) + return init_dev_clipboard_clipboard() + + # Setup for the WINDOWS platform: + elif os.name == "nt" or platform.system() == "Windows": + return init_windows_clipboard() + + if platform.system() == "Linux": + if _executable_exists("wslconfig.exe"): + return init_wsl_clipboard() + + # Setup for the macOS platform: + if os.name == "mac" or platform.system() == "Darwin": + try: + import AppKit + import Foundation # check if pyobjc is installed + except ImportError: + return init_osx_pbcopy_clipboard() + else: + return init_osx_pyobjc_clipboard() + + # Setup for the LINUX platform: + if HAS_DISPLAY: + if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"): + return init_wl_clipboard() + if _executable_exists("xsel"): + return init_xsel_clipboard() + if _executable_exists("xclip"): + return init_xclip_clipboard() + if _executable_exists("klipper") and _executable_exists("qdbus"): + return init_klipper_clipboard() + + try: + # qtpy is a small abstraction layer that lets you write applications + # using a single api call to either PyQt or PySide. + # https://pypi.python.org/project/QtPy + import qtpy # check if qtpy is installed + except ImportError: + # If qtpy isn't installed, fall back on importing PyQt4. + try: + import PyQt5 # check if PyQt5 is installed + except ImportError: + try: + import PyQt4 # check if PyQt4 is installed + except ImportError: + pass # We want to fail fast for all non-ImportError exceptions. + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + + return init_no_clipboard() + + +def set_clipboard(clipboard): + """ + Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how + the copy() and paste() functions interact with the operating system to + implement the copy/paste feature. The clipboard parameter must be one of: + - pbcopy + - pyobjc (default on macOS) + - qt + - xclip + - xsel + - klipper + - windows (default on Windows) + - no (this is what is set when no clipboard mechanism can be found) + """ + global copy, paste + + clipboard_types = { + "pbcopy": init_osx_pbcopy_clipboard, + "pyobjc": init_osx_pyobjc_clipboard, + "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' + "xclip": init_xclip_clipboard, + "xsel": init_xsel_clipboard, + "wl-clipboard": init_wl_clipboard, + "klipper": init_klipper_clipboard, + "windows": init_windows_clipboard, + "no": init_no_clipboard, + } + + if clipboard not in clipboard_types: + allowed_clipboard_types = [repr(_) for _ in clipboard_types] + raise ValueError( + f"Argument must be one of {', '.join(allowed_clipboard_types)}" + ) + + # Sets pyperclip's copy() and paste() functions: + copy, paste = clipboard_types[clipboard]() + + +def lazy_load_stub_copy(text): + """ + A stub function for copy(), which will load the real copy() function when + called so that the real copy() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return copy(text) + + +def lazy_load_stub_paste(): + """ + A stub function for paste(), which will load the real paste() function when + called so that the real paste() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return paste() + + +def is_available() -> bool: + return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste + + +# Initially, copy() and paste() are set to lazy loading wrappers which will +# set `copy` and `paste` to real functions the first time they're used, unless +# set_clipboard() or determine_clipboard() is called first. +copy, paste = lazy_load_stub_copy, lazy_load_stub_paste + + +def waitForPaste(timeout=None): + """This function call blocks until a non-empty text string exists on the + clipboard. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + while True: + clipboardText = paste() + if clipboardText != "": + return clipboardText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForPaste() timed out after " + str(timeout) + " seconds." + ) + + +def waitForNewPaste(timeout=None): + """This function call blocks until a new text string exists on the + clipboard that is different from the text that was there when the function + was first called. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + originalText = paste() + while True: + currentText = paste() + if currentText != originalText: + return currentText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForNewPaste() timed out after " + str(timeout) + " seconds." + ) + + +__all__ = [ + "copy", + "paste", + "waitForPaste", + "waitForNewPaste", + "set_clipboard", + "determine_clipboard", +] + +# pandas aliases +clipboard_get = paste +clipboard_set = copy diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a19ae94e2c55242cbda6594cced740efab813fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..275cbf0148f944eb04ca6c40c624cc5df77aa626 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__init__.py @@ -0,0 +1,19 @@ +from pandas.io.excel._base import ( + ExcelFile, + ExcelWriter, + read_excel, +) +from pandas.io.excel._odswriter import ODSWriter as _ODSWriter +from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter +from pandas.io.excel._util import register_writer +from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter + +__all__ = ["read_excel", "ExcelWriter", "ExcelFile"] + + +register_writer(_OpenpyxlWriter) + +register_writer(_XlsxWriter) + + +register_writer(_ODSWriter) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc868b9db9b4028efed33d0e4b9e08cff6cc51af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a0971abb37f55a48224933ca4c6723bf5b5953 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67a272db74bbd8876df8d5bc7984ab0ddf378c2b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_base.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..786f719337b84a29e5b6ea7577edd412b596920f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_base.py @@ -0,0 +1,1659 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import datetime +from functools import partial +from io import BytesIO +import os +from textwrap import fill +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Generic, + Literal, + TypeVar, + Union, + cast, + overload, +) +import warnings +import zipfile + +from pandas._config import config + +from pandas._libs import lib +from pandas._libs.parsers import STR_NA_VALUES +from pandas.compat._optional import ( + get_version, + import_optional_dependency, +) +from pandas.errors import EmptyDataError +from pandas.util._decorators import ( + Appender, + doc, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_bool, + is_float, + is_integer, + is_list_like, +) + +from pandas.core.frame import DataFrame +from pandas.core.shared_docs import _shared_docs +from pandas.util.version import Version + +from pandas.io.common import ( + IOHandles, + get_handle, + stringify_path, + validate_header_arg, +) +from pandas.io.excel._util import ( + fill_mi_header, + get_default_engine, + get_writer, + maybe_convert_usecols, + pop_header_name, +) +from pandas.io.parsers import TextParser +from pandas.io.parsers.readers import validate_integer + +if TYPE_CHECKING: + from types import TracebackType + + from pandas._typing import ( + DtypeArg, + DtypeBackend, + ExcelWriterIfSheetExists, + FilePath, + IntStrT, + ReadBuffer, + Self, + SequenceNotStr, + StorageOptions, + WriteExcelBuffer, + ) +_read_excel_doc = ( + """ +Read an Excel file into a ``pandas`` ``DataFrame``. + +Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions +read from a local filesystem or URL. Supports an option to read +a single sheet or a list of sheets. + +Parameters +---------- +io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: ``file://localhost/path/to/table.xlsx``. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, + such as a file handle (e.g. via builtin ``open`` function) + or ``StringIO``. + + .. deprecated:: 2.1.0 + Passing byte strings is deprecated. To read from a + byte string, wrap it in a ``BytesIO`` object. +sheet_name : str, int, list, or None, default 0 + Strings are used for sheet names. Integers are used in zero-indexed + sheet positions (chart sheets do not count as a sheet position). + Lists of strings/integers are used to request multiple sheets. + Specify ``None`` to get all worksheets. + + Available cases: + + * Defaults to ``0``: 1st sheet as a `DataFrame` + * ``1``: 2nd sheet as a `DataFrame` + * ``"Sheet1"``: Load sheet with name "Sheet1" + * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" + as a dict of `DataFrame` + * ``None``: All worksheets. + +header : int, list of int, default 0 + Row (0-indexed) to use for the column labels of the parsed + DataFrame. If a list of integers is passed those row positions will + be combined into a ``MultiIndex``. Use None if there is no header. +names : array-like, default None + List of column names to use. If file contains no header row, + then you should explicitly pass header=None. +index_col : int, str, list of int, default None + Column (0-indexed) to use as the row labels of the DataFrame. + Pass None if there is no such column. If a list is passed, + those columns will be combined into a ``MultiIndex``. If a + subset of data is selected with ``usecols``, index_col + is based on the subset. + + Missing values will be forward filled to allow roundtripping with + ``to_excel`` for ``merged_cells=True``. To avoid forward filling the + missing values use ``set_index`` after reading the data instead of + ``index_col``. +usecols : str, list-like, or callable, default None + * If None, then parse all columns. + * If str, then indicates comma separated list of Excel column letters + and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of + both sides. + * If list of int, then indicates list of column numbers to be parsed + (0-indexed). + * If list of string, then indicates list of column names to be parsed. + * If callable, then evaluate each column name against it and parse the + column if the callable returns ``True``. + + Returns a subset of the columns according to behavior above. +dtype : Type name or dict of column -> type, default None + Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}} + Use ``object`` to preserve data as stored in Excel and not interpret dtype, + which will necessarily result in ``object`` dtype. + If converters are specified, they will be applied INSTEAD + of dtype conversion. + If you use ``None``, it will infer the dtype of each column based on the data. +engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None + If io is not a buffer or path, this must be set to identify io. + Engine compatibility : + + - ``openpyxl`` supports newer Excel file formats. + - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb) + and OpenDocument (.ods) file formats. + - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). + - ``pyxlsb`` supports Binary Excel files. + - ``xlrd`` supports old-style Excel files (.xls). + + When ``engine=None``, the following logic will be used to determine the engine: + + - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), + then `odf `_ will be used. + - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used. + - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used. + - Otherwise ``openpyxl`` will be used. +converters : dict, default None + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the Excel cell content, and return the transformed + content. +true_values : list, default None + Values to consider as True. +false_values : list, default None + Values to consider as False. +skiprows : list-like, int, or callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (int) at the + start of the file. If callable, the callable function will be evaluated + against the row indices, returning True if the row should be skipped and + False otherwise. An example of a valid callable argument would be ``lambda + x: x in [0, 2]``. +nrows : int, default None + Number of rows to parse. +na_values : scalar, str, list-like, or dict, default None + Additional strings to recognize as NA/NaN. If dict passed, specific + per-column NA values. By default the following values are interpreted + as NaN: '""" + + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + + """'. +keep_default_na : bool, default True + Whether or not to include the default NaN values when parsing the data. + Depending on whether ``na_values`` is passed in, the behavior is as follows: + + * If ``keep_default_na`` is True, and ``na_values`` are specified, + ``na_values`` is appended to the default NaN values used for parsing. + * If ``keep_default_na`` is True, and ``na_values`` are not specified, only + the default NaN values are used for parsing. + * If ``keep_default_na`` is False, and ``na_values`` are specified, only + the NaN values specified ``na_values`` are used for parsing. + * If ``keep_default_na`` is False, and ``na_values`` are not specified, no + strings will be parsed as NaN. + + Note that if `na_filter` is passed in as False, the ``keep_default_na`` and + ``na_values`` parameters will be ignored. +na_filter : bool, default True + Detect missing value markers (empty strings and the value of na_values). In + data without any NAs, passing ``na_filter=False`` can improve the + performance of reading a large file. +verbose : bool, default False + Indicate number of NA values placed in non-numeric columns. +parse_dates : bool, list-like, or dict, default False + The behavior is as follows: + + * ``bool``. If True -> try parsing the index. + * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 + each as a separate date column. + * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as + a single date column. + * ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call + result 'foo' + + If a column or index contains an unparsable date, the entire column or + index will be returned unaltered as an object data type. If you don`t want to + parse some cells as date just change their type in Excel to "Text". + For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``. + + Note: A fast-path exists for iso8601-formatted dates. +date_parser : function, optional + Function to use for converting a sequence of string columns to an array of + datetime instances. The default uses ``dateutil.parser.parser`` to do the + conversion. Pandas will try to call `date_parser` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by `parse_dates` into a single array + and pass that; and 3) call `date_parser` once for each row using one or + more strings (corresponding to the columns defined by `parse_dates`) as + arguments. + + .. deprecated:: 2.0.0 + Use ``date_format`` instead, or read in as ``object`` and then apply + :func:`to_datetime` as-needed. +date_format : str or dict of column -> format, default ``None`` + If used in conjunction with ``parse_dates``, will parse dates according to this + format. For anything more complex, + please read in as ``object`` and then apply :func:`to_datetime` as-needed. + + .. versionadded:: 2.0.0 +thousands : str, default None + Thousands separator for parsing string columns to numeric. Note that + this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format. +decimal : str, default '.' + Character to recognize as decimal point for parsing string columns to numeric. + Note that this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format.(e.g. use ',' for European data). + + .. versionadded:: 1.4.0 + +comment : str, default None + Comments out remainder of line. Pass a character or characters to this + argument to indicate comments in the input file. Any data between the + comment string and the end of the current line is ignored. +skipfooter : int, default 0 + Rows at the end to skip (0-indexed). +{storage_options} + +dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + +engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + +Returns +------- +DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. See notes in sheet_name + argument for more information on when a dict of DataFrames is returned. + +See Also +-------- +DataFrame.to_excel : Write DataFrame to an Excel file. +DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. +read_csv : Read a comma-separated values (csv) file into DataFrame. +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Notes +----- +For specific information on the methods used for each Excel engine, refer to the pandas +:ref:`user guide ` + +Examples +-------- +The file can be read using the file name as string or an open file object: + +>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP + Name Value +0 string1 1 +1 string2 2 +2 #Comment 3 + +>>> pd.read_excel(open('tmp.xlsx', 'rb'), +... sheet_name='Sheet3') # doctest: +SKIP + Unnamed: 0 Name Value +0 0 string1 1 +1 1 string2 2 +2 2 #Comment 3 + +Index and header can be specified via the `index_col` and `header` arguments + +>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP + 0 1 2 +0 NaN Name Value +1 0.0 string1 1 +2 1.0 string2 2 +3 2.0 #Comment 3 + +Column types are inferred but can be explicitly specified + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 #Comment 3.0 + +True, False, and NA values, and thousands separators have defaults, +but can be explicitly specified, too. Supply the values you would like +as strings or lists of strings! + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... na_values=['string1', 'string2']) # doctest: +SKIP + Name Value +0 NaN 1 +1 NaN 2 +2 #Comment 3 + +Comment lines in the excel input file can be skipped using the +``comment`` kwarg. + +>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 None NaN +""" +) + + +@overload +def read_excel( + io, + # sheet name is str or int -> DataFrame + sheet_name: str | int = ..., + *, + header: int | Sequence[int] | None = ..., + names: SequenceNotStr[Hashable] | range | None = ..., + index_col: int | str | Sequence[int] | None = ..., + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = ..., + dtype: DtypeArg | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ..., + converters: dict[str, Callable] | dict[int, Callable] | None = ..., + true_values: Iterable[Hashable] | None = ..., + false_values: Iterable[Hashable] | None = ..., + skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., + nrows: int | None = ..., + na_values=..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + parse_dates: list | dict | bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: dict[Hashable, str] | str | None = ..., + thousands: str | None = ..., + decimal: str = ..., + comment: str | None = ..., + skipfooter: int = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_excel( + io, + # sheet name is list or None -> dict[IntStrT, DataFrame] + sheet_name: list[IntStrT] | None, + *, + header: int | Sequence[int] | None = ..., + names: SequenceNotStr[Hashable] | range | None = ..., + index_col: int | str | Sequence[int] | None = ..., + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = ..., + dtype: DtypeArg | None = ..., + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ..., + converters: dict[str, Callable] | dict[int, Callable] | None = ..., + true_values: Iterable[Hashable] | None = ..., + false_values: Iterable[Hashable] | None = ..., + skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., + nrows: int | None = ..., + na_values=..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool = ..., + parse_dates: list | dict | bool = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: dict[Hashable, str] | str | None = ..., + thousands: str | None = ..., + decimal: str = ..., + comment: str | None = ..., + skipfooter: int = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> dict[IntStrT, DataFrame]: + ... + + +@doc(storage_options=_shared_docs["storage_options"]) +@Appender(_read_excel_doc) +def read_excel( + io, + sheet_name: str | int | list[IntStrT] | None = 0, + *, + header: int | Sequence[int] | None = 0, + names: SequenceNotStr[Hashable] | range | None = None, + index_col: int | str | Sequence[int] | None = None, + usecols: int + | str + | Sequence[int] + | Sequence[str] + | Callable[[str], bool] + | None = None, + dtype: DtypeArg | None = None, + engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None, + converters: dict[str, Callable] | dict[int, Callable] | None = None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool = False, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: dict[Hashable, str] | str | None = None, + thousands: str | None = None, + decimal: str = ".", + comment: str | None = None, + skipfooter: int = 0, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + engine_kwargs: dict | None = None, +) -> DataFrame | dict[IntStrT, DataFrame]: + check_dtype_backend(dtype_backend) + should_close = False + if engine_kwargs is None: + engine_kwargs = {} + + if not isinstance(io, ExcelFile): + should_close = True + io = ExcelFile( + io, + storage_options=storage_options, + engine=engine, + engine_kwargs=engine_kwargs, + ) + elif engine and engine != io.engine: + raise ValueError( + "Engine should not be specified when passing " + "an ExcelFile - ExcelFile already has the engine set" + ) + + try: + data = io.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + dtype=dtype, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + keep_default_na=keep_default_na, + na_filter=na_filter, + verbose=verbose, + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + decimal=decimal, + comment=comment, + skipfooter=skipfooter, + dtype_backend=dtype_backend, + ) + finally: + # make sure to close opened file handles + if should_close: + io.close() + return data + + +_WorkbookT = TypeVar("_WorkbookT") + + +class BaseExcelReader(Generic[_WorkbookT]): + book: _WorkbookT + + def __init__( + self, + filepath_or_buffer, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + + # First argument can also be bytes, so create a buffer + if isinstance(filepath_or_buffer, bytes): + filepath_or_buffer = BytesIO(filepath_or_buffer) + + self.handles = IOHandles( + handle=filepath_or_buffer, compression={"method": None} + ) + if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): + self.handles = get_handle( + filepath_or_buffer, "rb", storage_options=storage_options, is_text=False + ) + + if isinstance(self.handles.handle, self._workbook_class): + self.book = self.handles.handle + elif hasattr(self.handles.handle, "read"): + # N.B. xlrd.Book has a read attribute too + self.handles.handle.seek(0) + try: + self.book = self.load_workbook(self.handles.handle, engine_kwargs) + except Exception: + self.close() + raise + else: + raise ValueError( + "Must explicitly set engine if not passing in buffer or path for io." + ) + + @property + def _workbook_class(self) -> type[_WorkbookT]: + raise NotImplementedError + + def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT: + raise NotImplementedError + + def close(self) -> None: + if hasattr(self, "book"): + if hasattr(self.book, "close"): + # pyxlsb: opens a TemporaryFile + # openpyxl: https://stackoverflow.com/questions/31416842/ + # openpyxl-does-not-close-excel-workbook-in-read-only-mode + self.book.close() + elif hasattr(self.book, "release_resources"): + # xlrd + # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548 + self.book.release_resources() + self.handles.close() + + @property + def sheet_names(self) -> list[str]: + raise NotImplementedError + + def get_sheet_by_name(self, name: str): + raise NotImplementedError + + def get_sheet_by_index(self, index: int): + raise NotImplementedError + + def get_sheet_data(self, sheet, rows: int | None = None): + raise NotImplementedError + + def raise_if_bad_sheet_by_index(self, index: int) -> None: + n_sheets = len(self.sheet_names) + if index >= n_sheets: + raise ValueError( + f"Worksheet index {index} is invalid, {n_sheets} worksheets found" + ) + + def raise_if_bad_sheet_by_name(self, name: str) -> None: + if name not in self.sheet_names: + raise ValueError(f"Worksheet named '{name}' not found") + + def _check_skiprows_func( + self, + skiprows: Callable, + rows_to_use: int, + ) -> int: + """ + Determine how many file rows are required to obtain `nrows` data + rows when `skiprows` is a function. + + Parameters + ---------- + skiprows : function + The function passed to read_excel by the user. + rows_to_use : int + The number of rows that will be needed for the header and + the data. + + Returns + ------- + int + """ + i = 0 + rows_used_so_far = 0 + while rows_used_so_far < rows_to_use: + if not skiprows(i): + rows_used_so_far += 1 + i += 1 + return i + + def _calc_rows( + self, + header: int | Sequence[int] | None, + index_col: int | Sequence[int] | None, + skiprows: Sequence[int] | int | Callable[[int], object] | None, + nrows: int | None, + ) -> int | None: + """ + If nrows specified, find the number of rows needed from the + file, otherwise return None. + + + Parameters + ---------- + header : int, list of int, or None + See read_excel docstring. + index_col : int, str, list of int, or None + See read_excel docstring. + skiprows : list-like, int, callable, or None + See read_excel docstring. + nrows : int or None + See read_excel docstring. + + Returns + ------- + int or None + """ + if nrows is None: + return None + if header is None: + header_rows = 1 + elif is_integer(header): + header = cast(int, header) + header_rows = 1 + header + else: + header = cast(Sequence, header) + header_rows = 1 + header[-1] + # If there is a MultiIndex header and an index then there is also + # a row containing just the index name(s) + if is_list_like(header) and index_col is not None: + header = cast(Sequence, header) + if len(header) > 1: + header_rows += 1 + if skiprows is None: + return header_rows + nrows + if is_integer(skiprows): + skiprows = cast(int, skiprows) + return header_rows + nrows + skiprows + if is_list_like(skiprows): + + def f(skiprows: Sequence, x: int) -> bool: + return x in skiprows + + skiprows = cast(Sequence, skiprows) + return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows) + if callable(skiprows): + return self._check_skiprows_func( + skiprows, + header_rows + nrows, + ) + # else unexpected skiprows type: read_excel will not optimize + # the number of rows read from file + return None + + def parse( + self, + sheet_name: str | int | list[int] | list[str] | None = 0, + header: int | Sequence[int] | None = 0, + names: SequenceNotStr[Hashable] | range | None = None, + index_col: int | Sequence[int] | None = None, + usecols=None, + dtype: DtypeArg | None = None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + verbose: bool = False, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: dict[Hashable, str] | str | None = None, + thousands: str | None = None, + decimal: str = ".", + comment: str | None = None, + skipfooter: int = 0, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwds, + ): + validate_header_arg(header) + validate_integer("nrows", nrows) + + ret_dict = False + + # Keep sheetname to maintain backwards compatibility. + sheets: list[int] | list[str] + if isinstance(sheet_name, list): + sheets = sheet_name + ret_dict = True + elif sheet_name is None: + sheets = self.sheet_names + ret_dict = True + elif isinstance(sheet_name, str): + sheets = [sheet_name] + else: + sheets = [sheet_name] + + # handle same-type duplicates. + sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys())) + + output = {} + + last_sheetname = None + for asheetname in sheets: + last_sheetname = asheetname + if verbose: + print(f"Reading sheet {asheetname}") + + if isinstance(asheetname, str): + sheet = self.get_sheet_by_name(asheetname) + else: # assume an integer if not a string + sheet = self.get_sheet_by_index(asheetname) + + file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows) + data = self.get_sheet_data(sheet, file_rows_needed) + if hasattr(sheet, "close"): + # pyxlsb opens two TemporaryFiles + sheet.close() + usecols = maybe_convert_usecols(usecols) + + if not data: + output[asheetname] = DataFrame() + continue + + is_list_header = False + is_len_one_list_header = False + if is_list_like(header): + assert isinstance(header, Sequence) + is_list_header = True + if len(header) == 1: + is_len_one_list_header = True + + if is_len_one_list_header: + header = cast(Sequence[int], header)[0] + + # forward fill and pull out names for MultiIndex column + header_names = None + if header is not None and is_list_like(header): + assert isinstance(header, Sequence) + + header_names = [] + control_row = [True] * len(data[0]) + + for row in header: + if is_integer(skiprows): + assert isinstance(skiprows, int) + row += skiprows + + if row > len(data) - 1: + raise ValueError( + f"header index {row} exceeds maximum index " + f"{len(data) - 1} of data.", + ) + + data[row], control_row = fill_mi_header(data[row], control_row) + + if index_col is not None: + header_name, _ = pop_header_name(data[row], index_col) + header_names.append(header_name) + + # If there is a MultiIndex header and an index then there is also + # a row containing just the index name(s) + has_index_names = False + if is_list_header and not is_len_one_list_header and index_col is not None: + index_col_list: Sequence[int] + if isinstance(index_col, int): + index_col_list = [index_col] + else: + assert isinstance(index_col, Sequence) + index_col_list = index_col + + # We have to handle mi without names. If any of the entries in the data + # columns are not empty, this is a regular row + assert isinstance(header, Sequence) + if len(header) < len(data): + potential_index_names = data[len(header)] + potential_data = [ + x + for i, x in enumerate(potential_index_names) + if not control_row[i] and i not in index_col_list + ] + has_index_names = all(x == "" or x is None for x in potential_data) + + if is_list_like(index_col): + # Forward fill values for MultiIndex index. + if header is None: + offset = 0 + elif isinstance(header, int): + offset = 1 + header + else: + offset = 1 + max(header) + + # GH34673: if MultiIndex names present and not defined in the header, + # offset needs to be incremented so that forward filling starts + # from the first MI value instead of the name + if has_index_names: + offset += 1 + + # Check if we have an empty dataset + # before trying to collect data. + if offset < len(data): + assert isinstance(index_col, Sequence) + + for col in index_col: + last = data[offset][col] + + for row in range(offset + 1, len(data)): + if data[row][col] == "" or data[row][col] is None: + data[row][col] = last + else: + last = data[row][col] + + # GH 12292 : error when read one empty column from excel file + try: + parser = TextParser( + data, + names=names, + header=header, + index_col=index_col, + has_index_names=has_index_names, + dtype=dtype, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + skip_blank_lines=False, # GH 39808 + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + decimal=decimal, + comment=comment, + skipfooter=skipfooter, + usecols=usecols, + dtype_backend=dtype_backend, + **kwds, + ) + + output[asheetname] = parser.read(nrows=nrows) + + if header_names: + output[asheetname].columns = output[asheetname].columns.set_names( + header_names + ) + + except EmptyDataError: + # No Data, return an empty DataFrame + output[asheetname] = DataFrame() + + except Exception as err: + err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:]) + raise err + + if last_sheetname is None: + raise ValueError("Sheet name is an empty list") + + if ret_dict: + return output + else: + return output[last_sheetname] + + +@doc(storage_options=_shared_docs["storage_options"]) +class ExcelWriter(Generic[_WorkbookT]): + """ + Class for writing DataFrame objects into excel sheets. + + Default is to use: + + * `xlsxwriter `__ for xlsx files if xlsxwriter + is installed otherwise `openpyxl `__ + * `odswriter `__ for ods files + + See ``DataFrame.to_excel`` for typical usage. + + The writer should be used as a context manager. Otherwise, call `close()` to save + and close any opened file handles. + + Parameters + ---------- + path : str or typing.BinaryIO + Path to xls or xlsx or ods file. + engine : str (optional) + Engine to use for writing. If None, defaults to + ``io.excel..writer``. NOTE: can only be passed as a keyword + argument. + date_format : str, default None + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + datetime_format : str, default None + Format string for datetime objects written into Excel files. + (e.g. 'YYYY-MM-DD HH:MM:SS'). + mode : {{'w', 'a'}}, default 'w' + File mode to use (write or append). Append does not work with fsspec URLs. + {storage_options} + + if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error' + How to behave when trying to write to a sheet that already + exists (append mode only). + + * error: raise a ValueError. + * new: Create a new sheet, with a name determined by the engine. + * replace: Delete the contents of the sheet before writing to it. + * overlay: Write contents to the existing sheet without first removing, + but possibly over top of, the existing contents. + + .. versionadded:: 1.3.0 + + .. versionchanged:: 1.4.0 + + Added ``overlay`` option + + engine_kwargs : dict, optional + Keyword arguments to be passed into the engine. These will be passed to + the following functions of the respective engines: + + * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)`` + * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)`` + * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)`` + * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)`` + + .. versionadded:: 1.3.0 + + Notes + ----- + For compatibility with CSV writers, ExcelWriter serializes lists + and dicts to strings before writing. + + Examples + -------- + Default usage: + + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with pd.ExcelWriter("path_to_file.xlsx") as writer: + ... df.to_excel(writer) # doctest: +SKIP + + To write to separate sheets in a single file: + + >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP + >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with pd.ExcelWriter("path_to_file.xlsx") as writer: + ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP + ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP + + You can set the date format or datetime format: + + >>> from datetime import date, datetime # doctest: +SKIP + >>> df = pd.DataFrame( + ... [ + ... [date(2014, 1, 31), date(1999, 9, 24)], + ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ... ], + ... index=["Date", "Datetime"], + ... columns=["X", "Y"], + ... ) # doctest: +SKIP + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... date_format="YYYY-MM-DD", + ... datetime_format="YYYY-MM-DD HH:MM:SS" + ... ) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + You can also append to an existing Excel file: + + >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer: + ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP + + Here, the `if_sheet_exists` parameter can be set to replace a sheet if it + already exists: + + >>> with ExcelWriter( + ... "path_to_file.xlsx", + ... mode="a", + ... engine="openpyxl", + ... if_sheet_exists="replace", + ... ) as writer: + ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP + + You can also write multiple DataFrames to a single sheet. Note that the + ``if_sheet_exists`` parameter needs to be set to ``overlay``: + + >>> with ExcelWriter("path_to_file.xlsx", + ... mode="a", + ... engine="openpyxl", + ... if_sheet_exists="overlay", + ... ) as writer: + ... df1.to_excel(writer, sheet_name="Sheet1") + ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP + + You can store Excel file in RAM: + + >>> import io + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) + >>> buffer = io.BytesIO() + >>> with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) + + You can pack Excel file into zip archive: + + >>> import zipfile # doctest: +SKIP + >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP + >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf: + ... with zf.open("filename.xlsx", "w") as buffer: + ... with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + You can specify additional arguments to the underlying engine: + + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... engine="xlsxwriter", + ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}} + ... ) as writer: + ... df.to_excel(writer) # doctest: +SKIP + + In append mode, ``engine_kwargs`` are passed through to + openpyxl's ``load_workbook``: + + >>> with pd.ExcelWriter( + ... "path_to_file.xlsx", + ... engine="openpyxl", + ... mode="a", + ... engine_kwargs={{"keep_vba": True}} + ... ) as writer: + ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP + """ + + # Defining an ExcelWriter implementation (see abstract methods for more...) + + # - Mandatory + # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)`` + # --> called to write additional DataFrames to disk + # - ``_supported_extensions`` (tuple of supported extensions), used to + # check that engine supports the given extension. + # - ``_engine`` - string that gives the engine name. Necessary to + # instantiate class directly and bypass ``ExcelWriterMeta`` engine + # lookup. + # - ``save(self)`` --> called to save file to disk + # - Mostly mandatory (i.e. should at least exist) + # - book, cur_sheet, path + + # - Optional: + # - ``__init__(self, path, engine=None, **kwargs)`` --> always called + # with path as first argument. + + # You also need to register the class with ``register_writer()``. + # Technically, ExcelWriter implementations don't need to subclass + # ExcelWriter. + + _engine: str + _supported_extensions: tuple[str, ...] + + def __new__( + cls, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict | None = None, + ) -> Self: + # only switch class if generic(ExcelWriter) + if cls is ExcelWriter: + if engine is None or (isinstance(engine, str) and engine == "auto"): + if isinstance(path, str): + ext = os.path.splitext(path)[-1][1:] + else: + ext = "xlsx" + + try: + engine = config.get_option(f"io.excel.{ext}.writer", silent=True) + if engine == "auto": + engine = get_default_engine(ext, mode="writer") + except KeyError as err: + raise ValueError(f"No engine for filetype: '{ext}'") from err + + # for mypy + assert engine is not None + # error: Incompatible types in assignment (expression has type + # "type[ExcelWriter[Any]]", variable has type "type[Self]") + cls = get_writer(engine) # type: ignore[assignment] + + return object.__new__(cls) + + # declare external properties you can count on + _path = None + + @property + def supported_extensions(self) -> tuple[str, ...]: + """Extensions that writer engine supports.""" + return self._supported_extensions + + @property + def engine(self) -> str: + """Name of engine.""" + return self._engine + + @property + def sheets(self) -> dict[str, Any]: + """Mapping of sheet names to sheet objects.""" + raise NotImplementedError + + @property + def book(self) -> _WorkbookT: + """ + Book instance. Class type will depend on the engine used. + + This attribute can be used to access engine-specific features. + """ + raise NotImplementedError + + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + """ + Write given formatted cells into Excel an excel sheet + + Parameters + ---------- + cells : generator + cell of formatted data to save to Excel sheet + sheet_name : str, default None + Name of Excel sheet, if None, then use self.cur_sheet + startrow : upper left cell row to dump data frame + startcol : upper left cell column to dump data frame + freeze_panes: int tuple of length 2 + contains the bottom-most row and right-most column to freeze + """ + raise NotImplementedError + + def _save(self) -> None: + """ + Save workbook to disk. + """ + raise NotImplementedError + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + ) -> None: + # validate that this engine can handle the extension + if isinstance(path, str): + ext = os.path.splitext(path)[-1] + self.check_extension(ext) + + # use mode to open the file + if "b" not in mode: + mode += "b" + # use "a" for the user to append data to excel but internally use "r+" to let + # the excel backend first read the existing file and then write any data to it + mode = mode.replace("a", "r+") + + if if_sheet_exists not in (None, "error", "new", "replace", "overlay"): + raise ValueError( + f"'{if_sheet_exists}' is not valid for if_sheet_exists. " + "Valid options are 'error', 'new', 'replace' and 'overlay'." + ) + if if_sheet_exists and "r+" not in mode: + raise ValueError("if_sheet_exists is only valid in append mode (mode='a')") + if if_sheet_exists is None: + if_sheet_exists = "error" + self._if_sheet_exists = if_sheet_exists + + # cast ExcelWriter to avoid adding 'if self._handles is not None' + self._handles = IOHandles( + cast(IO[bytes], path), compression={"compression": None} + ) + if not isinstance(path, ExcelWriter): + self._handles = get_handle( + path, mode, storage_options=storage_options, is_text=False + ) + self._cur_sheet = None + + if date_format is None: + self._date_format = "YYYY-MM-DD" + else: + self._date_format = date_format + if datetime_format is None: + self._datetime_format = "YYYY-MM-DD HH:MM:SS" + else: + self._datetime_format = datetime_format + + self._mode = mode + + @property + def date_format(self) -> str: + """ + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + """ + return self._date_format + + @property + def datetime_format(self) -> str: + """ + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD'). + """ + return self._datetime_format + + @property + def if_sheet_exists(self) -> str: + """ + How to behave when writing to a sheet that already exists in append mode. + """ + return self._if_sheet_exists + + def __fspath__(self) -> str: + return getattr(self._handles.handle, "name", "") + + def _get_sheet_name(self, sheet_name: str | None) -> str: + if sheet_name is None: + sheet_name = self._cur_sheet + if sheet_name is None: # pragma: no cover + raise ValueError("Must pass explicit sheet_name or set _cur_sheet property") + return sheet_name + + def _value_with_fmt( + self, val + ) -> tuple[ + int | float | bool | str | datetime.datetime | datetime.date, str | None + ]: + """ + Convert numpy types to Python types for the Excel writers. + + Parameters + ---------- + val : object + Value to be written into cells + + Returns + ------- + Tuple with the first element being the converted value and the second + being an optional format + """ + fmt = None + + if is_integer(val): + val = int(val) + elif is_float(val): + val = float(val) + elif is_bool(val): + val = bool(val) + elif isinstance(val, datetime.datetime): + fmt = self._datetime_format + elif isinstance(val, datetime.date): + fmt = self._date_format + elif isinstance(val, datetime.timedelta): + val = val.total_seconds() / 86400 + fmt = "0" + else: + val = str(val) + + return val, fmt + + @classmethod + def check_extension(cls, ext: str) -> Literal[True]: + """ + checks that path's extension against the Writer's supported + extensions. If it isn't supported, raises UnsupportedFiletypeError. + """ + if ext.startswith("."): + ext = ext[1:] + if not any(ext in extension for extension in cls._supported_extensions): + raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'") + return True + + # Allow use as a contextmanager + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + def close(self) -> None: + """synonym for save, to make it more file-like""" + self._save() + self._handles.close() + + +XLS_SIGNATURES = ( + b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 + b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 + b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 + b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary +) +ZIP_SIGNATURE = b"PK\x03\x04" +PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) + + +@doc(storage_options=_shared_docs["storage_options"]) +def inspect_excel_format( + content_or_path: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, +) -> str | None: + """ + Inspect the path or content of an excel file and get its format. + + Adopted from xlrd: https://github.com/python-excel/xlrd. + + Parameters + ---------- + content_or_path : str or file-like object + Path to file or content of file to inspect. May be a URL. + {storage_options} + + Returns + ------- + str or None + Format of file if it can be determined. + + Raises + ------ + ValueError + If resulting stream is empty. + BadZipFile + If resulting stream does not have an XLS signature and is not a valid zipfile. + """ + if isinstance(content_or_path, bytes): + content_or_path = BytesIO(content_or_path) + + with get_handle( + content_or_path, "rb", storage_options=storage_options, is_text=False + ) as handle: + stream = handle.handle + stream.seek(0) + buf = stream.read(PEEK_SIZE) + if buf is None: + raise ValueError("stream is empty") + assert isinstance(buf, bytes) + peek = buf + stream.seek(0) + + if any(peek.startswith(sig) for sig in XLS_SIGNATURES): + return "xls" + elif not peek.startswith(ZIP_SIGNATURE): + return None + + with zipfile.ZipFile(stream) as zf: + # Workaround for some third party files that use forward slashes and + # lower case names. + component_names = [ + name.replace("\\", "/").lower() for name in zf.namelist() + ] + + if "xl/workbook.xml" in component_names: + return "xlsx" + if "xl/workbook.bin" in component_names: + return "xlsb" + if "content.xml" in component_names: + return "ods" + return "zip" + + +class ExcelFile: + """ + Class for parsing tabular Excel sheets into DataFrame objects. + + See read_excel for more documentation. + + Parameters + ---------- + path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath), + A file-like object, xlrd workbook or openpyxl workbook. + If a string or path object, expected to be a path to a + .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file. + engine : str, default None + If io is not a buffer or path, this must be set to identify io. + Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine`` + Engine compatibility : + + - ``xlrd`` supports old-style Excel files (.xls). + - ``openpyxl`` supports newer Excel file formats. + - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). + - ``pyxlsb`` supports Binary Excel files. + - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb) + and OpenDocument (.ods) file formats. + + .. versionchanged:: 1.2.0 + + The engine `xlrd `_ + now only supports old-style ``.xls`` files. + When ``engine=None``, the following logic will be + used to determine the engine: + + - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), + then `odf `_ will be used. + - Otherwise if ``path_or_buffer`` is an xls format, + ``xlrd`` will be used. + - Otherwise if ``path_or_buffer`` is in xlsb format, + `pyxlsb `_ will be used. + + .. versionadded:: 1.3.0 + + - Otherwise if `openpyxl `_ is installed, + then ``openpyxl`` will be used. + - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised. + + .. warning:: + + Please do not report issues when using ``xlrd`` to read ``.xlsx`` files. + This is not supported, switch to using ``openpyxl`` instead. + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + + Examples + -------- + >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP + >>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP + ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP + """ + + from pandas.io.excel._calamine import CalamineReader + from pandas.io.excel._odfreader import ODFReader + from pandas.io.excel._openpyxl import OpenpyxlReader + from pandas.io.excel._pyxlsb import PyxlsbReader + from pandas.io.excel._xlrd import XlrdReader + + _engines: Mapping[str, Any] = { + "xlrd": XlrdReader, + "openpyxl": OpenpyxlReader, + "odf": ODFReader, + "pyxlsb": PyxlsbReader, + "calamine": CalamineReader, + } + + def __init__( + self, + path_or_buffer, + engine: str | None = None, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + if engine_kwargs is None: + engine_kwargs = {} + + if engine is not None and engine not in self._engines: + raise ValueError(f"Unknown engine: {engine}") + + # First argument can also be bytes, so create a buffer + if isinstance(path_or_buffer, bytes): + path_or_buffer = BytesIO(path_or_buffer) + warnings.warn( + "Passing bytes to 'read_excel' is deprecated and " + "will be removed in a future version. To read from a " + "byte string, wrap it in a `BytesIO` object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # Could be a str, ExcelFile, Book, etc. + self.io = path_or_buffer + # Always a string + self._io = stringify_path(path_or_buffer) + + # Determine xlrd version if installed + if import_optional_dependency("xlrd", errors="ignore") is None: + xlrd_version = None + else: + import xlrd + + xlrd_version = Version(get_version(xlrd)) + + if engine is None: + # Only determine ext if it is needed + ext: str | None + if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + content_or_path=path_or_buffer, storage_options=storage_options + ) + if ext is None: + raise ValueError( + "Excel file format cannot be determined, you must specify " + "an engine manually." + ) + + engine = config.get_option(f"io.excel.{ext}.reader", silent=True) + if engine == "auto": + engine = get_default_engine(ext, mode="reader") + + assert engine is not None + self.engine = engine + self.storage_options = storage_options + + self._reader = self._engines[engine]( + self._io, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + def __fspath__(self): + return self._io + + def parse( + self, + sheet_name: str | int | list[int] | list[str] | None = 0, + header: int | Sequence[int] | None = 0, + names: SequenceNotStr[Hashable] | range | None = None, + index_col: int | Sequence[int] | None = None, + usecols=None, + converters=None, + true_values: Iterable[Hashable] | None = None, + false_values: Iterable[Hashable] | None = None, + skiprows: Sequence[int] | int | Callable[[int], object] | None = None, + nrows: int | None = None, + na_values=None, + parse_dates: list | dict | bool = False, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + thousands: str | None = None, + comment: str | None = None, + skipfooter: int = 0, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwds, + ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]: + """ + Parse specified sheet(s) into a DataFrame. + + Equivalent to read_excel(ExcelFile, ...) See the read_excel + docstring for more info on accepted parameters. + + Returns + ------- + DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. + + Examples + -------- + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) + >>> df.to_excel('myfile.xlsx') # doctest: +SKIP + >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP + >>> file.parse() # doctest: +SKIP + """ + return self._reader.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + parse_dates=parse_dates, + date_parser=date_parser, + date_format=date_format, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + dtype_backend=dtype_backend, + **kwds, + ) + + @property + def book(self): + return self._reader.book + + @property + def sheet_names(self): + return self._reader.sheet_names + + def close(self) -> None: + """close io if necessary""" + self._reader.close() + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py new file mode 100644 index 0000000000000000000000000000000000000000..69b514da32857119f048a25f647d1002315a9889 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py @@ -0,0 +1,253 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._typing import ( + FilePath, + ReadBuffer, + Scalar, + StorageOptions, +) +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +import pandas as pd +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from odf.opendocument import OpenDocument + + from pandas._libs.tslibs.nattype import NaTType + + +@doc(storage_options=_shared_docs["storage_options"]) +class ODFReader(BaseExcelReader["OpenDocument"]): + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Read tables out of OpenDocument formatted files. + + Parameters + ---------- + filepath_or_buffer : str, path to be parsed or + an open readable stream. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("odf") + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[OpenDocument]: + from odf.opendocument import OpenDocument + + return OpenDocument + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> OpenDocument: + from odf.opendocument import load + + return load(filepath_or_buffer, **engine_kwargs) + + @property + def empty_value(self) -> str: + """Property for compat with other readers.""" + return "" + + @property + def sheet_names(self) -> list[str]: + """Return a list of sheet names present in the document""" + from odf.table import Table + + tables = self.book.getElementsByType(Table) + return [t.getAttribute("name") for t in tables] + + def get_sheet_by_index(self, index: int): + from odf.table import Table + + self.raise_if_bad_sheet_by_index(index) + tables = self.book.getElementsByType(Table) + return tables[index] + + def get_sheet_by_name(self, name: str): + from odf.table import Table + + self.raise_if_bad_sheet_by_name(name) + tables = self.book.getElementsByType(Table) + + for table in tables: + if table.getAttribute("name") == name: + return table + + self.close() + raise ValueError(f"sheet {name} not found") + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar | NaTType]]: + """ + Parse an ODF Table into a list of lists + """ + from odf.table import ( + CoveredTableCell, + TableCell, + TableRow, + ) + + covered_cell_name = CoveredTableCell().qname + table_cell_name = TableCell().qname + cell_names = {covered_cell_name, table_cell_name} + + sheet_rows = sheet.getElementsByType(TableRow) + empty_rows = 0 + max_row_len = 0 + + table: list[list[Scalar | NaTType]] = [] + + for sheet_row in sheet_rows: + sheet_cells = [ + x + for x in sheet_row.childNodes + if hasattr(x, "qname") and x.qname in cell_names + ] + empty_cells = 0 + table_row: list[Scalar | NaTType] = [] + + for sheet_cell in sheet_cells: + if sheet_cell.qname == table_cell_name: + value = self._get_cell_value(sheet_cell) + else: + value = self.empty_value + + column_repeat = self._get_column_repeat(sheet_cell) + + # Queue up empty values, writing only if content succeeds them + if value == self.empty_value: + empty_cells += column_repeat + else: + table_row.extend([self.empty_value] * empty_cells) + empty_cells = 0 + table_row.extend([value] * column_repeat) + + if max_row_len < len(table_row): + max_row_len = len(table_row) + + row_repeat = self._get_row_repeat(sheet_row) + if len(table_row) == 0: + empty_rows += row_repeat + else: + # add blank rows to our table + table.extend([[self.empty_value]] * empty_rows) + empty_rows = 0 + table.extend(table_row for _ in range(row_repeat)) + if file_rows_needed is not None and len(table) >= file_rows_needed: + break + + # Make our table square + for row in table: + if len(row) < max_row_len: + row.extend([self.empty_value] * (max_row_len - len(row))) + + return table + + def _get_row_repeat(self, row) -> int: + """ + Return number of times this row was repeated + Repeating an empty row appeared to be a common way + of representing sparse rows in the table. + """ + from odf.namespaces import TABLENS + + return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1)) + + def _get_column_repeat(self, cell) -> int: + from odf.namespaces import TABLENS + + return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1)) + + def _get_cell_value(self, cell) -> Scalar | NaTType: + from odf.namespaces import OFFICENS + + if str(cell) == "#N/A": + return np.nan + + cell_type = cell.attributes.get((OFFICENS, "value-type")) + if cell_type == "boolean": + if str(cell) == "TRUE": + return True + return False + if cell_type is None: + return self.empty_value + elif cell_type == "float": + # GH5394 + cell_value = float(cell.attributes.get((OFFICENS, "value"))) + val = int(cell_value) + if val == cell_value: + return val + return cell_value + elif cell_type == "percentage": + cell_value = cell.attributes.get((OFFICENS, "value")) + return float(cell_value) + elif cell_type == "string": + return self._get_cell_string_value(cell) + elif cell_type == "currency": + cell_value = cell.attributes.get((OFFICENS, "value")) + return float(cell_value) + elif cell_type == "date": + cell_value = cell.attributes.get((OFFICENS, "date-value")) + return pd.Timestamp(cell_value) + elif cell_type == "time": + stamp = pd.Timestamp(str(cell)) + # cast needed here because Scalar doesn't include datetime.time + return cast(Scalar, stamp.time()) + else: + self.close() + raise ValueError(f"Unrecognized type {cell_type}") + + def _get_cell_string_value(self, cell) -> str: + """ + Find and decode OpenDocument text:s tags that represent + a run length encoded sequence of space characters. + """ + from odf.element import Element + from odf.namespaces import TEXTNS + from odf.office import Annotation + from odf.text import S + + office_annotation = Annotation().qname + text_s = S().qname + + value = [] + + for fragment in cell.childNodes: + if isinstance(fragment, Element): + if fragment.qname == text_s: + spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) + value.append(" " * spaces) + elif fragment.qname == office_annotation: + continue + else: + # recursive impl needed in case of nested fragments + # with multiple spaces + # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 + value.append(self._get_cell_string_value(fragment)) + else: + value.append(str(fragment).strip("\n")) + return "".join(value) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py new file mode 100644 index 0000000000000000000000000000000000000000..a6e42616c20438fa4cab16e94b5d16a01c9c61df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py @@ -0,0 +1,127 @@ +# pyright: reportMissingImports=false +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from pyxlsb import Workbook + + from pandas._typing import ( + FilePath, + ReadBuffer, + Scalar, + StorageOptions, + ) + + +class PyxlsbReader(BaseExcelReader["Workbook"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using pyxlsb engine. + + Parameters + ---------- + filepath_or_buffer : str, path object, or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + import_optional_dependency("pyxlsb") + # This will call load_workbook on the filepath or buffer + # And set the result to the book-attribute + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Workbook]: + from pyxlsb import Workbook + + return Workbook + + def load_workbook( + self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs + ) -> Workbook: + from pyxlsb import open_workbook + + # TODO: hack in buffer capability + # This might need some modifications to the Pyxlsb library + # Actual work for opening it is in xlsbpackage.py, line 20-ish + + return open_workbook(filepath_or_buffer, **engine_kwargs) + + @property + def sheet_names(self) -> list[str]: + return self.book.sheets + + def get_sheet_by_name(self, name: str): + self.raise_if_bad_sheet_by_name(name) + return self.book.get_sheet(name) + + def get_sheet_by_index(self, index: int): + self.raise_if_bad_sheet_by_index(index) + # pyxlsb sheets are indexed from 1 onwards + # There's a fix for this in the source, but the pypi package doesn't have it + return self.book.get_sheet(index + 1) + + def _convert_cell(self, cell) -> Scalar: + # TODO: there is no way to distinguish between floats and datetimes in pyxlsb + # This means that there is no way to read datetime types from an xlsb file yet + if cell.v is None: + return "" # Prevents non-named columns from not showing up as Unnamed: i + if isinstance(cell.v, float): + val = int(cell.v) + if val == cell.v: + return val + else: + return float(cell.v) + + return cell.v + + def get_sheet_data( + self, + sheet, + file_rows_needed: int | None = None, + ) -> list[list[Scalar]]: + data: list[list[Scalar]] = [] + previous_row_number = -1 + # When sparse=True the rows can have different lengths and empty rows are + # not returned. The cells are namedtuples of row, col, value (r, c, v). + for row in sheet.rows(sparse=True): + row_number = row[0].r + converted_row = [self._convert_cell(cell) for cell in row] + while converted_row and converted_row[-1] == "": + # trim trailing empty elements + converted_row.pop() + if converted_row: + data.extend([[]] * (row_number - previous_row_number - 1)) + data.append(converted_row) + previous_row_number = row_number + if file_rows_needed is not None and len(data) >= file_rows_needed: + break + if data: + # extend rows to max_width + max_width = max(len(data_row) for data_row in data) + if min(len(data_row) for data_row in data) < max_width: + empty_cell: list[Scalar] = [""] + data = [ + data_row + (max_width - len(data_row)) * empty_cell + for data_row in data + ] + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_util.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a1fcb8052e391d0853be64866663f4e6de9d08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_util.py @@ -0,0 +1,334 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + MutableMapping, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + TypeVar, + overload, +) + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) + +if TYPE_CHECKING: + from pandas.io.excel._base import ExcelWriter + + ExcelWriter_t = type[ExcelWriter] + usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object]) + +_writers: MutableMapping[str, ExcelWriter_t] = {} + + +def register_writer(klass: ExcelWriter_t) -> None: + """ + Add engine to the excel writer registry.io.excel. + + You must use this method to integrate with ``to_excel``. + + Parameters + ---------- + klass : ExcelWriter + """ + if not callable(klass): + raise ValueError("Can only register callables as engines") + engine_name = klass._engine + _writers[engine_name] = klass + + +def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str: + """ + Return the default reader/writer for the given extension. + + Parameters + ---------- + ext : str + The excel file extension for which to get the default engine. + mode : str {'reader', 'writer'} + Whether to get the default engine for reading or writing. + Either 'reader' or 'writer' + + Returns + ------- + str + The default engine for the extension. + """ + _default_readers = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + _default_writers = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "ods": "odf", + } + assert mode in ["reader", "writer"] + if mode == "writer": + # Prefer xlsxwriter over openpyxl if installed + xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn") + if xlsxwriter: + _default_writers["xlsx"] = "xlsxwriter" + return _default_writers[ext] + else: + return _default_readers[ext] + + +def get_writer(engine_name: str) -> ExcelWriter_t: + try: + return _writers[engine_name] + except KeyError as err: + raise ValueError(f"No Excel writer '{engine_name}'") from err + + +def _excel2num(x: str) -> int: + """ + Convert Excel column name like 'AB' to 0-based column index. + + Parameters + ---------- + x : str + The Excel column name to convert to a 0-based column index. + + Returns + ------- + num : int + The column index corresponding to the name. + + Raises + ------ + ValueError + Part of the Excel column name was invalid. + """ + index = 0 + + for c in x.upper().strip(): + cp = ord(c) + + if cp < ord("A") or cp > ord("Z"): + raise ValueError(f"Invalid column name: {x}") + + index = index * 26 + cp - ord("A") + 1 + + return index - 1 + + +def _range2cols(areas: str) -> list[int]: + """ + Convert comma separated list of column names and ranges to indices. + + Parameters + ---------- + areas : str + A string containing a sequence of column ranges (or areas). + + Returns + ------- + cols : list + A list of 0-based column indices. + + Examples + -------- + >>> _range2cols('A:E') + [0, 1, 2, 3, 4] + >>> _range2cols('A,C,Z:AB') + [0, 2, 25, 26, 27] + """ + cols: list[int] = [] + + for rng in areas.split(","): + if ":" in rng: + rngs = rng.split(":") + cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) + else: + cols.append(_excel2num(rng)) + + return cols + + +@overload +def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: + ... + + +@overload +def maybe_convert_usecols(usecols: list[str]) -> list[str]: + ... + + +@overload +def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: + ... + + +@overload +def maybe_convert_usecols(usecols: None) -> None: + ... + + +def maybe_convert_usecols( + usecols: str | list[int] | list[str] | usecols_func | None, +) -> None | list[int] | list[str] | usecols_func: + """ + Convert `usecols` into a compatible format for parsing in `parsers.py`. + + Parameters + ---------- + usecols : object + The use-columns object to potentially convert. + + Returns + ------- + converted : object + The compatible format of `usecols`. + """ + if usecols is None: + return usecols + + if is_integer(usecols): + raise ValueError( + "Passing an integer for `usecols` is no longer supported. " + "Please pass in a list of int from 0 to `usecols` inclusive instead." + ) + + if isinstance(usecols, str): + return _range2cols(usecols) + + return usecols + + +@overload +def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: + ... + + +@overload +def validate_freeze_panes(freeze_panes: None) -> Literal[False]: + ... + + +def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool: + if freeze_panes is not None: + if len(freeze_panes) == 2 and all( + isinstance(item, int) for item in freeze_panes + ): + return True + + raise ValueError( + "freeze_panes must be of form (row, column) " + "where row and column are integers" + ) + + # freeze_panes wasn't specified, return False so it won't be applied + # to output sheet + return False + + +def fill_mi_header( + row: list[Hashable], control_row: list[bool] +) -> tuple[list[Hashable], list[bool]]: + """ + Forward fill blank entries in row but only inside the same parent index. + + Used for creating headers in Multiindex. + + Parameters + ---------- + row : list + List of items in a single row. + control_row : list of bool + Helps to determine if particular column is in same parent index as the + previous value. Used to stop propagation of empty cells between + different indexes. + + Returns + ------- + Returns changed row and control_row + """ + last = row[0] + for i in range(1, len(row)): + if not control_row[i]: + last = row[i] + + if row[i] == "" or row[i] is None: + row[i] = last + else: + control_row[i] = False + last = row[i] + + return row, control_row + + +def pop_header_name( + row: list[Hashable], index_col: int | Sequence[int] +) -> tuple[Hashable | None, list[Hashable]]: + """ + Pop the header name for MultiIndex parsing. + + Parameters + ---------- + row : list + The data row to parse for the header name. + index_col : int, list + The index columns for our data. Assumed to be non-null. + + Returns + ------- + header_name : str + The extracted header name. + trimmed_row : list + The original data row with the header name removed. + """ + # Pop out header name and fill w/blank. + if is_list_like(index_col): + assert isinstance(index_col, Iterable) + i = max(index_col) + else: + assert not isinstance(index_col, Iterable) + i = index_col + + header_name = row[i] + header_name = None if header_name == "" else header_name + + return header_name, row[:i] + [""] + row[i + 1 :] + + +def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict: + """ + Used to combine two sources of kwargs for the backend engine. + + Use of kwargs is deprecated, this function is solely for use in 1.3 and should + be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs + or kwargs must be None or empty respectively. + + Parameters + ---------- + engine_kwargs: dict + kwargs to be passed through to the engine. + kwargs: dict + kwargs to be psased through to the engine (deprecated) + + Returns + ------- + engine_kwargs combined with kwargs + """ + if engine_kwargs is None: + result = {} + else: + result = engine_kwargs.copy() + result.update(kwargs) + return result diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py new file mode 100644 index 0000000000000000000000000000000000000000..a444970792e6e65faf3d8947b721fff59487d994 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +from datetime import time +import math +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.excel._base import BaseExcelReader + +if TYPE_CHECKING: + from xlrd import Book + + from pandas._typing import ( + Scalar, + StorageOptions, + ) + + +class XlrdReader(BaseExcelReader["Book"]): + @doc(storage_options=_shared_docs["storage_options"]) + def __init__( + self, + filepath_or_buffer, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + Reader using xlrd engine. + + Parameters + ---------- + filepath_or_buffer : str, path object or Workbook + Object to be parsed. + {storage_options} + engine_kwargs : dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + err_msg = "Install xlrd >= 2.0.1 for xls Excel support" + import_optional_dependency("xlrd", extra=err_msg) + super().__init__( + filepath_or_buffer, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + + @property + def _workbook_class(self) -> type[Book]: + from xlrd import Book + + return Book + + def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book: + from xlrd import open_workbook + + if hasattr(filepath_or_buffer, "read"): + data = filepath_or_buffer.read() + return open_workbook(file_contents=data, **engine_kwargs) + else: + return open_workbook(filepath_or_buffer, **engine_kwargs) + + @property + def sheet_names(self): + return self.book.sheet_names() + + def get_sheet_by_name(self, name): + self.raise_if_bad_sheet_by_name(name) + return self.book.sheet_by_name(name) + + def get_sheet_by_index(self, index): + self.raise_if_bad_sheet_by_index(index) + return self.book.sheet_by_index(index) + + def get_sheet_data( + self, sheet, file_rows_needed: int | None = None + ) -> list[list[Scalar]]: + from xlrd import ( + XL_CELL_BOOLEAN, + XL_CELL_DATE, + XL_CELL_ERROR, + XL_CELL_NUMBER, + xldate, + ) + + epoch1904 = self.book.datemode + + def _parse_cell(cell_contents, cell_typ): + """ + converts the contents of the cell into a pandas appropriate object + """ + if cell_typ == XL_CELL_DATE: + # Use the newer xlrd datetime handling. + try: + cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904) + except OverflowError: + return cell_contents + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (cell_contents.timetuple())[0:3] + if (not epoch1904 and year == (1899, 12, 31)) or ( + epoch1904 and year == (1904, 1, 1) + ): + cell_contents = time( + cell_contents.hour, + cell_contents.minute, + cell_contents.second, + cell_contents.microsecond, + ) + + elif cell_typ == XL_CELL_ERROR: + cell_contents = np.nan + elif cell_typ == XL_CELL_BOOLEAN: + cell_contents = bool(cell_contents) + elif cell_typ == XL_CELL_NUMBER: + # GH5394 - Excel 'numbers' are always floats + # it's a minimal perf hit and less surprising + if math.isfinite(cell_contents): + # GH54564 - don't attempt to convert NaN/Inf + val = int(cell_contents) + if val == cell_contents: + cell_contents = val + return cell_contents + + data = [] + + nrows = sheet.nrows + if file_rows_needed is not None: + nrows = min(nrows, file_rows_needed) + for i in range(nrows): + row = [ + _parse_cell(value, typ) + for value, typ in zip(sheet.row_values(i), sheet.row_types(i)) + ] + data.append(row) + + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py new file mode 100644 index 0000000000000000000000000000000000000000..6eacac8c064fb1f297cd46b8ab0361ceb22067b4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py @@ -0,0 +1,284 @@ +from __future__ import annotations + +import json +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import ( + combine_kwargs, + validate_freeze_panes, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ExcelWriterIfSheetExists, + FilePath, + StorageOptions, + WriteExcelBuffer, + ) + + +class _XlsxStyler: + # Map from openpyxl-oriented styles to flatter xlsxwriter representation + # Ordering necessary for both determinism and because some are keyed by + # prefixes of others. + STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = { + "font": [ + (("name",), "font_name"), + (("sz",), "font_size"), + (("size",), "font_size"), + (("color", "rgb"), "font_color"), + (("color",), "font_color"), + (("b",), "bold"), + (("bold",), "bold"), + (("i",), "italic"), + (("italic",), "italic"), + (("u",), "underline"), + (("underline",), "underline"), + (("strike",), "font_strikeout"), + (("vertAlign",), "font_script"), + (("vertalign",), "font_script"), + ], + "number_format": [(("format_code",), "num_format"), ((), "num_format")], + "protection": [(("locked",), "locked"), (("hidden",), "hidden")], + "alignment": [ + (("horizontal",), "align"), + (("vertical",), "valign"), + (("text_rotation",), "rotation"), + (("wrap_text",), "text_wrap"), + (("indent",), "indent"), + (("shrink_to_fit",), "shrink"), + ], + "fill": [ + (("patternType",), "pattern"), + (("patterntype",), "pattern"), + (("fill_type",), "pattern"), + (("start_color", "rgb"), "fg_color"), + (("fgColor", "rgb"), "fg_color"), + (("fgcolor", "rgb"), "fg_color"), + (("start_color",), "fg_color"), + (("fgColor",), "fg_color"), + (("fgcolor",), "fg_color"), + (("end_color", "rgb"), "bg_color"), + (("bgColor", "rgb"), "bg_color"), + (("bgcolor", "rgb"), "bg_color"), + (("end_color",), "bg_color"), + (("bgColor",), "bg_color"), + (("bgcolor",), "bg_color"), + ], + "border": [ + (("color", "rgb"), "border_color"), + (("color",), "border_color"), + (("style",), "border"), + (("top", "color", "rgb"), "top_color"), + (("top", "color"), "top_color"), + (("top", "style"), "top"), + (("top",), "top"), + (("right", "color", "rgb"), "right_color"), + (("right", "color"), "right_color"), + (("right", "style"), "right"), + (("right",), "right"), + (("bottom", "color", "rgb"), "bottom_color"), + (("bottom", "color"), "bottom_color"), + (("bottom", "style"), "bottom"), + (("bottom",), "bottom"), + (("left", "color", "rgb"), "left_color"), + (("left", "color"), "left_color"), + (("left", "style"), "left"), + (("left",), "left"), + ], + } + + @classmethod + def convert(cls, style_dict, num_format_str=None): + """ + converts a style_dict to an xlsxwriter format dict + + Parameters + ---------- + style_dict : style dictionary to convert + num_format_str : optional number format string + """ + # Create a XlsxWriter format object. + props = {} + + if num_format_str is not None: + props["num_format"] = num_format_str + + if style_dict is None: + return props + + if "borders" in style_dict: + style_dict = style_dict.copy() + style_dict["border"] = style_dict.pop("borders") + + for style_group_key, style_group in style_dict.items(): + for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): + # src is a sequence of keys into a nested dict + # dst is a flat key + if dst in props: + continue + v = style_group + for k in src: + try: + v = v[k] + except (KeyError, TypeError): + break + else: + props[dst] = v + + if isinstance(props.get("pattern"), str): + # TODO: support other fill patterns + props["pattern"] = 0 if props["pattern"] == "none" else 1 + + for k in ["border", "top", "right", "bottom", "left"]: + if isinstance(props.get(k), str): + try: + props[k] = [ + "none", + "thin", + "medium", + "dashed", + "dotted", + "thick", + "double", + "hair", + "mediumDashed", + "dashDot", + "mediumDashDot", + "dashDotDot", + "mediumDashDotDot", + "slantDashDot", + ].index(props[k]) + except ValueError: + props[k] = 2 + + if isinstance(props.get("font_script"), str): + props["font_script"] = ["baseline", "superscript", "subscript"].index( + props["font_script"] + ) + + if isinstance(props.get("underline"), str): + props["underline"] = { + "none": 0, + "single": 1, + "double": 2, + "singleAccounting": 33, + "doubleAccounting": 34, + }[props["underline"]] + + # GH 30107 - xlsxwriter uses different name + if props.get("valign") == "center": + props["valign"] = "vcenter" + + return props + + +class XlsxWriter(ExcelWriter): + _engine = "xlsxwriter" + _supported_extensions = (".xlsx",) + + def __init__( + self, + path: FilePath | WriteExcelBuffer | ExcelWriter, + engine: str | None = None, + date_format: str | None = None, + datetime_format: str | None = None, + mode: str = "w", + storage_options: StorageOptions | None = None, + if_sheet_exists: ExcelWriterIfSheetExists | None = None, + engine_kwargs: dict[str, Any] | None = None, + **kwargs, + ) -> None: + # Use the xlsxwriter module as the Excel writer. + from xlsxwriter import Workbook + + engine_kwargs = combine_kwargs(engine_kwargs, kwargs) + + if mode == "a": + raise ValueError("Append mode is not supported with xlsxwriter!") + + super().__init__( + path, + engine=engine, + date_format=date_format, + datetime_format=datetime_format, + mode=mode, + storage_options=storage_options, + if_sheet_exists=if_sheet_exists, + engine_kwargs=engine_kwargs, + ) + + try: + self._book = Workbook(self._handles.handle, **engine_kwargs) + except TypeError: + self._handles.handle.close() + raise + + @property + def book(self): + """ + Book instance of class xlsxwriter.Workbook. + + This attribute can be used to access engine-specific features. + """ + return self._book + + @property + def sheets(self) -> dict[str, Any]: + result = self.book.sheetnames + return result + + def _save(self) -> None: + """ + Save workbook to disk. + """ + self.book.close() + + def _write_cells( + self, + cells, + sheet_name: str | None = None, + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + ) -> None: + # Write the frame cells using xlsxwriter. + sheet_name = self._get_sheet_name(sheet_name) + + wks = self.book.get_worksheet_by_name(sheet_name) + if wks is None: + wks = self.book.add_worksheet(sheet_name) + + style_dict = {"null": None} + + if validate_freeze_panes(freeze_panes): + wks.freeze_panes(*(freeze_panes)) + + for cell in cells: + val, fmt = self._value_with_fmt(cell.val) + + stylekey = json.dumps(cell.style) + if fmt: + stylekey += fmt + + if stylekey in style_dict: + style = style_dict[stylekey] + else: + style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt)) + style_dict[stylekey] = style + + if cell.mergestart is not None and cell.mergeend is not None: + wks.merge_range( + startrow + cell.row, + startcol + cell.col, + startrow + cell.mergestart, + startcol + cell.mergeend, + val, + style, + ) + else: + wks.write(startrow + cell.row, startcol + cell.col, val, style) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5e56b1bc7ba4377cc5de9d68a1424524aef21cb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__init__.py @@ -0,0 +1,9 @@ +# ruff: noqa: TCH004 +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # import modules that have public classes/functions + from pandas.io.formats import style + + # and mark only those modules as public + __all__ = ["style"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7399a857b25f23853409452300ec89386fbceee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..623ccb6373779d252bb98050d6d9dce9e0ea81b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78b6bcfa0cdc1032337da22a672ae48e6e4a177 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a02b789613e212a11e91a79ea0c9f89090f72f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9116d5ed6ccda966df8986600fe58682141490a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da92add02321b326fc76cca70483b697f37e759e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1c97059f4ab718674ee9b4db8c7b991eff4e8ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d9004a84638880c7a58c68cd69368503793c51c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8338efa4c1e9d466c92ce044281d2b07090b6cb3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3097a7fe5662e0ca7c3e813be4312a27af7238b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8de85cf1bd63e63fe35eeff9001fa57c5ce23b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..714747da3ed2ca07be6a22ee295d994d15907019 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..086e4f501030c1bd6c56d245764df25e2d0602a8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5174b233774963996b44520f66281b79489fad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/_color_data.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/_color_data.py new file mode 100644 index 0000000000000000000000000000000000000000..2e7cb7f29646eb11c0ec83d8a909a8cfd7953182 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/_color_data.py @@ -0,0 +1,157 @@ +# GH37967: Enable the use of CSS named colors, as defined in +# matplotlib.colors.CSS4_COLORS, when exporting to Excel. +# This data has been copied here, instead of being imported from matplotlib, +# not to have ``to_excel`` methods require matplotlib. +# source: matplotlib._color_data (3.3.3) +from __future__ import annotations + +CSS4_COLORS = { + "aliceblue": "F0F8FF", + "antiquewhite": "FAEBD7", + "aqua": "00FFFF", + "aquamarine": "7FFFD4", + "azure": "F0FFFF", + "beige": "F5F5DC", + "bisque": "FFE4C4", + "black": "000000", + "blanchedalmond": "FFEBCD", + "blue": "0000FF", + "blueviolet": "8A2BE2", + "brown": "A52A2A", + "burlywood": "DEB887", + "cadetblue": "5F9EA0", + "chartreuse": "7FFF00", + "chocolate": "D2691E", + "coral": "FF7F50", + "cornflowerblue": "6495ED", + "cornsilk": "FFF8DC", + "crimson": "DC143C", + "cyan": "00FFFF", + "darkblue": "00008B", + "darkcyan": "008B8B", + "darkgoldenrod": "B8860B", + "darkgray": "A9A9A9", + "darkgreen": "006400", + "darkgrey": "A9A9A9", + "darkkhaki": "BDB76B", + "darkmagenta": "8B008B", + "darkolivegreen": "556B2F", + "darkorange": "FF8C00", + "darkorchid": "9932CC", + "darkred": "8B0000", + "darksalmon": "E9967A", + "darkseagreen": "8FBC8F", + "darkslateblue": "483D8B", + "darkslategray": "2F4F4F", + "darkslategrey": "2F4F4F", + "darkturquoise": "00CED1", + "darkviolet": "9400D3", + "deeppink": "FF1493", + "deepskyblue": "00BFFF", + "dimgray": "696969", + "dimgrey": "696969", + "dodgerblue": "1E90FF", + "firebrick": "B22222", + "floralwhite": "FFFAF0", + "forestgreen": "228B22", + "fuchsia": "FF00FF", + "gainsboro": "DCDCDC", + "ghostwhite": "F8F8FF", + "gold": "FFD700", + "goldenrod": "DAA520", + "gray": "808080", + "green": "008000", + "greenyellow": "ADFF2F", + "grey": "808080", + "honeydew": "F0FFF0", + "hotpink": "FF69B4", + "indianred": "CD5C5C", + "indigo": "4B0082", + "ivory": "FFFFF0", + "khaki": "F0E68C", + "lavender": "E6E6FA", + "lavenderblush": "FFF0F5", + "lawngreen": "7CFC00", + "lemonchiffon": "FFFACD", + "lightblue": "ADD8E6", + "lightcoral": "F08080", + "lightcyan": "E0FFFF", + "lightgoldenrodyellow": "FAFAD2", + "lightgray": "D3D3D3", + "lightgreen": "90EE90", + "lightgrey": "D3D3D3", + "lightpink": "FFB6C1", + "lightsalmon": "FFA07A", + "lightseagreen": "20B2AA", + "lightskyblue": "87CEFA", + "lightslategray": "778899", + "lightslategrey": "778899", + "lightsteelblue": "B0C4DE", + "lightyellow": "FFFFE0", + "lime": "00FF00", + "limegreen": "32CD32", + "linen": "FAF0E6", + "magenta": "FF00FF", + "maroon": "800000", + "mediumaquamarine": "66CDAA", + "mediumblue": "0000CD", + "mediumorchid": "BA55D3", + "mediumpurple": "9370DB", + "mediumseagreen": "3CB371", + "mediumslateblue": "7B68EE", + "mediumspringgreen": "00FA9A", + "mediumturquoise": "48D1CC", + "mediumvioletred": "C71585", + "midnightblue": "191970", + "mintcream": "F5FFFA", + "mistyrose": "FFE4E1", + "moccasin": "FFE4B5", + "navajowhite": "FFDEAD", + "navy": "000080", + "oldlace": "FDF5E6", + "olive": "808000", + "olivedrab": "6B8E23", + "orange": "FFA500", + "orangered": "FF4500", + "orchid": "DA70D6", + "palegoldenrod": "EEE8AA", + "palegreen": "98FB98", + "paleturquoise": "AFEEEE", + "palevioletred": "DB7093", + "papayawhip": "FFEFD5", + "peachpuff": "FFDAB9", + "peru": "CD853F", + "pink": "FFC0CB", + "plum": "DDA0DD", + "powderblue": "B0E0E6", + "purple": "800080", + "rebeccapurple": "663399", + "red": "FF0000", + "rosybrown": "BC8F8F", + "royalblue": "4169E1", + "saddlebrown": "8B4513", + "salmon": "FA8072", + "sandybrown": "F4A460", + "seagreen": "2E8B57", + "seashell": "FFF5EE", + "sienna": "A0522D", + "silver": "C0C0C0", + "skyblue": "87CEEB", + "slateblue": "6A5ACD", + "slategray": "708090", + "slategrey": "708090", + "snow": "FFFAFA", + "springgreen": "00FF7F", + "steelblue": "4682B4", + "tan": "D2B48C", + "teal": "008080", + "thistle": "D8BFD8", + "tomato": "FF6347", + "turquoise": "40E0D0", + "violet": "EE82EE", + "wheat": "F5DEB3", + "white": "FFFFFF", + "whitesmoke": "F5F5F5", + "yellow": "FFFF00", + "yellowgreen": "9ACD32", +} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/console.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/console.py new file mode 100644 index 0000000000000000000000000000000000000000..2a6cbe07629031687c249f70b51bdfbe2dd84041 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/console.py @@ -0,0 +1,94 @@ +""" +Internal module for console introspection +""" +from __future__ import annotations + +from shutil import get_terminal_size + + +def get_console_size() -> tuple[int | None, int | None]: + """ + Return console size as tuple = (width, height). + + Returns (None,None) in non-interactive session. + """ + from pandas import get_option + + display_width = get_option("display.width") + display_height = get_option("display.max_rows") + + # Consider + # interactive shell terminal, can detect term size + # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term + # size non-interactive script, should disregard term size + + # in addition + # width,height have default values, but setting to 'None' signals + # should use Auto-Detection, But only in interactive shell-terminal. + # Simple. yeah. + + if in_interactive_session(): + if in_ipython_frontend(): + # sane defaults for interactive non-shell terminal + # match default for width,height in config_init + from pandas._config.config import get_default_val + + terminal_width = get_default_val("display.width") + terminal_height = get_default_val("display.max_rows") + else: + # pure terminal + terminal_width, terminal_height = get_terminal_size() + else: + terminal_width, terminal_height = None, None + + # Note if the User sets width/Height to None (auto-detection) + # and we're in a script (non-inter), this will return (None,None) + # caller needs to deal. + return display_width or terminal_width, display_height or terminal_height + + +# ---------------------------------------------------------------------- +# Detect our environment + + +def in_interactive_session() -> bool: + """ + Check if we're running in an interactive shell. + + Returns + ------- + bool + True if running under python/ipython interactive shell. + """ + from pandas import get_option + + def check_main(): + try: + import __main__ as main + except ModuleNotFoundError: + return get_option("mode.sim_interactive") + return not hasattr(main, "__file__") or get_option("mode.sim_interactive") + + try: + # error: Name '__IPYTHON__' is not defined + return __IPYTHON__ or check_main() # type: ignore[name-defined] + except NameError: + return check_main() + + +def in_ipython_frontend() -> bool: + """ + Check if we're inside an IPython zmq frontend. + + Returns + ------- + bool + """ + try: + # error: Name 'get_ipython' is not defined + ip = get_ipython() # type: ignore[name-defined] + return "zmq" in str(type(ip)).lower() + except NameError: + pass + + return False diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/css.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/css.py new file mode 100644 index 0000000000000000000000000000000000000000..ccce60c00a9e02bf3bb7f21c5ec799b7123e8eed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/css.py @@ -0,0 +1,421 @@ +""" +Utilities for interpreting CSS from Stylers for formatting non-HTML outputs. +""" +from __future__ import annotations + +import re +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +from pandas.errors import CSSWarning +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Iterable, + Iterator, + ) + + +def _side_expander(prop_fmt: str) -> Callable: + """ + Wrapper to expand shorthand property into top, right, bottom, left properties + + Parameters + ---------- + side : str + The border side to expand into properties + + Returns + ------- + function: Return to call when a 'border(-{side}): {value}' string is encountered + """ + + def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: + """ + Expand shorthand property into side-specific property (top, right, bottom, left) + + Parameters + ---------- + prop (str): CSS property name + value (str): String token for property + + Yields + ------ + Tuple (str, str): Expanded property, value + """ + tokens = value.split() + try: + mapping = self.SIDE_SHORTHANDS[len(tokens)] + except KeyError: + warnings.warn( + f'Could not expand "{prop}: {value}"', + CSSWarning, + stacklevel=find_stack_level(), + ) + return + for key, idx in zip(self.SIDES, mapping): + yield prop_fmt.format(key), tokens[idx] + + return expand + + +def _border_expander(side: str = "") -> Callable: + """ + Wrapper to expand 'border' property into border color, style, and width properties + + Parameters + ---------- + side : str + The border side to expand into properties + + Returns + ------- + function: Return to call when a 'border(-{side}): {value}' string is encountered + """ + if side != "": + side = f"-{side}" + + def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: + """ + Expand border into color, style, and width tuples + + Parameters + ---------- + prop : str + CSS property name passed to styler + value : str + Value passed to styler for property + + Yields + ------ + Tuple (str, str): Expanded property, value + """ + tokens = value.split() + if len(tokens) == 0 or len(tokens) > 3: + warnings.warn( + f'Too many tokens provided to "{prop}" (expected 1-3)', + CSSWarning, + stacklevel=find_stack_level(), + ) + + # TODO: Can we use current color as initial value to comply with CSS standards? + border_declarations = { + f"border{side}-color": "black", + f"border{side}-style": "none", + f"border{side}-width": "medium", + } + for token in tokens: + if token.lower() in self.BORDER_STYLES: + border_declarations[f"border{side}-style"] = token + elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS): + border_declarations[f"border{side}-width"] = token + else: + border_declarations[f"border{side}-color"] = token + # TODO: Warn user if item entered more than once (e.g. "border: red green") + + # Per CSS, "border" will reset previous "border-*" definitions + yield from self.atomize(border_declarations.items()) + + return expand + + +class CSSResolver: + """ + A callable for parsing and resolving CSS to atomic properties. + """ + + UNIT_RATIOS = { + "pt": ("pt", 1), + "em": ("em", 1), + "rem": ("pt", 12), + "ex": ("em", 0.5), + # 'ch': + "px": ("pt", 0.75), + "pc": ("pt", 12), + "in": ("pt", 72), + "cm": ("in", 1 / 2.54), + "mm": ("in", 1 / 25.4), + "q": ("mm", 0.25), + "!!default": ("em", 0), + } + + FONT_SIZE_RATIOS = UNIT_RATIOS.copy() + FONT_SIZE_RATIOS.update( + { + "%": ("em", 0.01), + "xx-small": ("rem", 0.5), + "x-small": ("rem", 0.625), + "small": ("rem", 0.8), + "medium": ("rem", 1), + "large": ("rem", 1.125), + "x-large": ("rem", 1.5), + "xx-large": ("rem", 2), + "smaller": ("em", 1 / 1.2), + "larger": ("em", 1.2), + "!!default": ("em", 1), + } + ) + + MARGIN_RATIOS = UNIT_RATIOS.copy() + MARGIN_RATIOS.update({"none": ("pt", 0)}) + + BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy() + BORDER_WIDTH_RATIOS.update( + { + "none": ("pt", 0), + "thick": ("px", 4), + "medium": ("px", 2), + "thin": ("px", 1), + # Default: medium only if solid + } + ) + + BORDER_STYLES = [ + "none", + "hidden", + "dotted", + "dashed", + "solid", + "double", + "groove", + "ridge", + "inset", + "outset", + "mediumdashdot", + "dashdotdot", + "hair", + "mediumdashdotdot", + "dashdot", + "slantdashdot", + "mediumdashed", + ] + + SIDE_SHORTHANDS = { + 1: [0, 0, 0, 0], + 2: [0, 1, 0, 1], + 3: [0, 1, 2, 1], + 4: [0, 1, 2, 3], + } + + SIDES = ("top", "right", "bottom", "left") + + CSS_EXPANSIONS = { + **{ + (f"border-{prop}" if prop else "border"): _border_expander(prop) + for prop in ["", "top", "right", "bottom", "left"] + }, + **{ + f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}") + for prop in ["color", "style", "width"] + }, + "margin": _side_expander("margin-{:s}"), + "padding": _side_expander("padding-{:s}"), + } + + def __call__( + self, + declarations: str | Iterable[tuple[str, str]], + inherited: dict[str, str] | None = None, + ) -> dict[str, str]: + """ + The given declarations to atomic properties. + + Parameters + ---------- + declarations_str : str | Iterable[tuple[str, str]] + A CSS string or set of CSS declaration tuples + e.g. "font-weight: bold; background: blue" or + {("font-weight", "bold"), ("background", "blue")} + inherited : dict, optional + Atomic properties indicating the inherited style context in which + declarations_str is to be resolved. ``inherited`` should already + be resolved, i.e. valid output of this method. + + Returns + ------- + dict + Atomic CSS 2.2 properties. + + Examples + -------- + >>> resolve = CSSResolver() + >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'} + >>> out = resolve(''' + ... border-color: BLUE RED; + ... font-size: 1em; + ... font-size: 2em; + ... font-weight: normal; + ... font-weight: inherit; + ... ''', inherited) + >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE + [('border-bottom-color', 'blue'), + ('border-left-color', 'red'), + ('border-right-color', 'red'), + ('border-top-color', 'blue'), + ('font-family', 'serif'), + ('font-size', '24pt'), + ('font-weight', 'bold')] + """ + if isinstance(declarations, str): + declarations = self.parse(declarations) + props = dict(self.atomize(declarations)) + if inherited is None: + inherited = {} + + props = self._update_initial(props, inherited) + props = self._update_font_size(props, inherited) + return self._update_other_units(props) + + def _update_initial( + self, + props: dict[str, str], + inherited: dict[str, str], + ) -> dict[str, str]: + # 1. resolve inherited, initial + for prop, val in inherited.items(): + if prop not in props: + props[prop] = val + + new_props = props.copy() + for prop, val in props.items(): + if val == "inherit": + val = inherited.get(prop, "initial") + + if val in ("initial", None): + # we do not define a complete initial stylesheet + del new_props[prop] + else: + new_props[prop] = val + return new_props + + def _update_font_size( + self, + props: dict[str, str], + inherited: dict[str, str], + ) -> dict[str, str]: + # 2. resolve relative font size + if props.get("font-size"): + props["font-size"] = self.size_to_pt( + props["font-size"], + self._get_font_size(inherited), + conversions=self.FONT_SIZE_RATIOS, + ) + return props + + def _get_font_size(self, props: dict[str, str]) -> float | None: + if props.get("font-size"): + font_size_string = props["font-size"] + return self._get_float_font_size_from_pt(font_size_string) + return None + + def _get_float_font_size_from_pt(self, font_size_string: str) -> float: + assert font_size_string.endswith("pt") + return float(font_size_string.rstrip("pt")) + + def _update_other_units(self, props: dict[str, str]) -> dict[str, str]: + font_size = self._get_font_size(props) + # 3. TODO: resolve other font-relative units + for side in self.SIDES: + prop = f"border-{side}-width" + if prop in props: + props[prop] = self.size_to_pt( + props[prop], + em_pt=font_size, + conversions=self.BORDER_WIDTH_RATIOS, + ) + + for prop in [f"margin-{side}", f"padding-{side}"]: + if prop in props: + # TODO: support % + props[prop] = self.size_to_pt( + props[prop], + em_pt=font_size, + conversions=self.MARGIN_RATIOS, + ) + return props + + def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str: + def _error(): + warnings.warn( + f"Unhandled size: {repr(in_val)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return self.size_to_pt("1!!default", conversions=conversions) + + match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val) + if match is None: + return _error() + + val, unit = match.groups() + if val == "": + # hack for 'large' etc. + val = 1 + else: + try: + val = float(val) + except ValueError: + return _error() + + while unit != "pt": + if unit == "em": + if em_pt is None: + unit = "rem" + else: + val *= em_pt + unit = "pt" + continue + + try: + unit, mul = conversions[unit] + except KeyError: + return _error() + val *= mul + + val = round(val, 5) + if int(val) == val: + size_fmt = f"{int(val):d}pt" + else: + size_fmt = f"{val:f}pt" + return size_fmt + + def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]: + for prop, value in declarations: + prop = prop.lower() + value = value.lower() + if prop in self.CSS_EXPANSIONS: + expand = self.CSS_EXPANSIONS[prop] + yield from expand(self, prop, value) + else: + yield prop, value + + def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]: + """ + Generates (prop, value) pairs from declarations. + + In a future version may generate parsed tokens from tinycss/tinycss2 + + Parameters + ---------- + declarations_str : str + """ + for decl in declarations_str.split(";"): + if not decl.strip(): + continue + prop, sep, val = decl.partition(":") + prop = prop.strip().lower() + # TODO: don't lowercase case sensitive parts of values (strings) + val = val.strip().lower() + if sep: + yield prop, val + else: + warnings.warn( + f"Ill-formatted attribute: expected a colon in {repr(decl)}", + CSSWarning, + stacklevel=find_stack_level(), + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/excel.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/excel.py new file mode 100644 index 0000000000000000000000000000000000000000..5fd23cd7d918ad0efddb1088d79fd78f6079cca7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/excel.py @@ -0,0 +1,962 @@ +""" +Utilities for conversion to writer-agnostic Excel representation. +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, +) +import functools +import itertools +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +import numpy as np + +from pandas._libs.lib import is_list_like +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes import missing +from pandas.core.dtypes.common import ( + is_float, + is_scalar, +) + +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, +) +import pandas.core.common as com +from pandas.core.shared_docs import _shared_docs + +from pandas.io.formats._color_data import CSS4_COLORS +from pandas.io.formats.css import ( + CSSResolver, + CSSWarning, +) +from pandas.io.formats.format import get_level_lengths +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + IndexLabel, + StorageOptions, + WriteExcelBuffer, + ) + + from pandas import ExcelWriter + + +class ExcelCell: + __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend") + __slots__ = __fields__ + + def __init__( + self, + row: int, + col: int, + val, + style=None, + mergestart: int | None = None, + mergeend: int | None = None, + ) -> None: + self.row = row + self.col = col + self.val = val + self.style = style + self.mergestart = mergestart + self.mergeend = mergeend + + +class CssExcelCell(ExcelCell): + def __init__( + self, + row: int, + col: int, + val, + style: dict | None, + css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None, + css_row: int, + css_col: int, + css_converter: Callable | None, + **kwargs, + ) -> None: + if css_styles and css_converter: + # Use dict to get only one (case-insensitive) declaration per property + declaration_dict = { + prop.lower(): val for prop, val in css_styles[css_row, css_col] + } + # Convert to frozenset for order-invariant caching + unique_declarations = frozenset(declaration_dict.items()) + style = css_converter(unique_declarations) + + super().__init__(row=row, col=col, val=val, style=style, **kwargs) + + +class CSSToExcelConverter: + """ + A callable for converting CSS declarations to ExcelWriter styles + + Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow), + focusing on font styling, backgrounds, borders and alignment. + + Operates by first computing CSS styles in a fairly generic + way (see :meth:`compute_css`) then determining Excel style + properties from CSS properties (see :meth:`build_xlstyle`). + + Parameters + ---------- + inherited : str, optional + CSS declarations understood to be the containing scope for the + CSS processed by :meth:`__call__`. + """ + + NAMED_COLORS = CSS4_COLORS + + VERTICAL_MAP = { + "top": "top", + "text-top": "top", + "middle": "center", + "baseline": "bottom", + "bottom": "bottom", + "text-bottom": "bottom", + # OpenXML also has 'justify', 'distributed' + } + + BOLD_MAP = { + "bold": True, + "bolder": True, + "600": True, + "700": True, + "800": True, + "900": True, + "normal": False, + "lighter": False, + "100": False, + "200": False, + "300": False, + "400": False, + "500": False, + } + + ITALIC_MAP = { + "normal": False, + "italic": True, + "oblique": True, + } + + FAMILY_MAP = { + "serif": 1, # roman + "sans-serif": 2, # swiss + "cursive": 4, # script + "fantasy": 5, # decorative + } + + BORDER_STYLE_MAP = { + style.lower(): style + for style in [ + "dashed", + "mediumDashDot", + "dashDotDot", + "hair", + "dotted", + "mediumDashDotDot", + "double", + "dashDot", + "slantDashDot", + "mediumDashed", + ] + } + + # NB: Most of the methods here could be classmethods, as only __init__ + # and __call__ make use of instance attributes. We leave them as + # instancemethods so that users can easily experiment with extensions + # without monkey-patching. + inherited: dict[str, str] | None + + def __init__(self, inherited: str | None = None) -> None: + if inherited is not None: + self.inherited = self.compute_css(inherited) + else: + self.inherited = None + # We should avoid cache on the __call__ method. + # Otherwise once the method __call__ has been called + # garbage collection no longer deletes the instance. + self._call_cached = functools.cache(self._call_uncached) + + compute_css = CSSResolver() + + def __call__( + self, declarations: str | frozenset[tuple[str, str]] + ) -> dict[str, dict[str, str]]: + """ + Convert CSS declarations to ExcelWriter style. + + Parameters + ---------- + declarations : str | frozenset[tuple[str, str]] + CSS string or set of CSS declaration tuples. + e.g. "font-weight: bold; background: blue" or + {("font-weight", "bold"), ("background", "blue")} + + Returns + ------- + xlstyle : dict + A style as interpreted by ExcelWriter when found in + ExcelCell.style. + """ + return self._call_cached(declarations) + + def _call_uncached( + self, declarations: str | frozenset[tuple[str, str]] + ) -> dict[str, dict[str, str]]: + properties = self.compute_css(declarations, self.inherited) + return self.build_xlstyle(properties) + + def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]: + out = { + "alignment": self.build_alignment(props), + "border": self.build_border(props), + "fill": self.build_fill(props), + "font": self.build_font(props), + "number_format": self.build_number_format(props), + } + + # TODO: handle cell width and height: needs support in pandas.io.excel + + def remove_none(d: dict[str, str | None]) -> None: + """Remove key where value is None, through nested dicts""" + for k, v in list(d.items()): + if v is None: + del d[k] + elif isinstance(v, dict): + remove_none(v) + if not v: + del d[k] + + remove_none(out) + return out + + def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]: + # TODO: text-indent, padding-left -> alignment.indent + return { + "horizontal": props.get("text-align"), + "vertical": self._get_vertical_alignment(props), + "wrap_text": self._get_is_wrap_text(props), + } + + def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None: + vertical_align = props.get("vertical-align") + if vertical_align: + return self.VERTICAL_MAP.get(vertical_align) + return None + + def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None: + if props.get("white-space") is None: + return None + return bool(props["white-space"] not in ("nowrap", "pre", "pre-line")) + + def build_border( + self, props: Mapping[str, str] + ) -> dict[str, dict[str, str | None]]: + return { + side: { + "style": self._border_style( + props.get(f"border-{side}-style"), + props.get(f"border-{side}-width"), + self.color_to_excel(props.get(f"border-{side}-color")), + ), + "color": self.color_to_excel(props.get(f"border-{side}-color")), + } + for side in ["top", "right", "bottom", "left"] + } + + def _border_style(self, style: str | None, width: str | None, color: str | None): + # convert styles and widths to openxml, one of: + # 'dashDot' + # 'dashDotDot' + # 'dashed' + # 'dotted' + # 'double' + # 'hair' + # 'medium' + # 'mediumDashDot' + # 'mediumDashDotDot' + # 'mediumDashed' + # 'slantDashDot' + # 'thick' + # 'thin' + if width is None and style is None and color is None: + # Return None will remove "border" from style dictionary + return None + + if width is None and style is None: + # Return "none" will keep "border" in style dictionary + return "none" + + if style in ("none", "hidden"): + return "none" + + width_name = self._get_width_name(width) + if width_name is None: + return "none" + + if style in (None, "groove", "ridge", "inset", "outset", "solid"): + # not handled + return width_name + + if style == "double": + return "double" + if style == "dotted": + if width_name in ("hair", "thin"): + return "dotted" + return "mediumDashDotDot" + if style == "dashed": + if width_name in ("hair", "thin"): + return "dashed" + return "mediumDashed" + elif style in self.BORDER_STYLE_MAP: + # Excel-specific styles + return self.BORDER_STYLE_MAP[style] + else: + warnings.warn( + f"Unhandled border style format: {repr(style)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return "none" + + def _get_width_name(self, width_input: str | None) -> str | None: + width = self._width_to_float(width_input) + if width < 1e-5: + return None + elif width < 1.3: + return "thin" + elif width < 2.8: + return "medium" + return "thick" + + def _width_to_float(self, width: str | None) -> float: + if width is None: + width = "2pt" + return self._pt_to_float(width) + + def _pt_to_float(self, pt_string: str) -> float: + assert pt_string.endswith("pt") + return float(pt_string.rstrip("pt")) + + def build_fill(self, props: Mapping[str, str]): + # TODO: perhaps allow for special properties + # -excel-pattern-bgcolor and -excel-pattern-type + fill_color = props.get("background-color") + if fill_color not in (None, "transparent", "none"): + return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"} + + def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]: + fc = props.get("number-format") + fc = fc.replace("§", ";") if isinstance(fc, str) else fc + return {"format_code": fc} + + def build_font( + self, props: Mapping[str, str] + ) -> dict[str, bool | float | str | None]: + font_names = self._get_font_names(props) + decoration = self._get_decoration(props) + return { + "name": font_names[0] if font_names else None, + "family": self._select_font_family(font_names), + "size": self._get_font_size(props), + "bold": self._get_is_bold(props), + "italic": self._get_is_italic(props), + "underline": ("single" if "underline" in decoration else None), + "strike": ("line-through" in decoration) or None, + "color": self.color_to_excel(props.get("color")), + # shadow if nonzero digit before shadow color + "shadow": self._get_shadow(props), + } + + def _get_is_bold(self, props: Mapping[str, str]) -> bool | None: + weight = props.get("font-weight") + if weight: + return self.BOLD_MAP.get(weight) + return None + + def _get_is_italic(self, props: Mapping[str, str]) -> bool | None: + font_style = props.get("font-style") + if font_style: + return self.ITALIC_MAP.get(font_style) + return None + + def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]: + decoration = props.get("text-decoration") + if decoration is not None: + return decoration.split() + else: + return () + + def _get_underline(self, decoration: Sequence[str]) -> str | None: + if "underline" in decoration: + return "single" + return None + + def _get_shadow(self, props: Mapping[str, str]) -> bool | None: + if "text-shadow" in props: + return bool(re.search("^[^#(]*[1-9]", props["text-shadow"])) + return None + + def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]: + font_names_tmp = re.findall( + r"""(?x) + ( + "(?:[^"]|\\")+" + | + '(?:[^']|\\')+' + | + [^'",]+ + )(?=,|\s*$) + """, + props.get("font-family", ""), + ) + + font_names = [] + for name in font_names_tmp: + if name[:1] == '"': + name = name[1:-1].replace('\\"', '"') + elif name[:1] == "'": + name = name[1:-1].replace("\\'", "'") + else: + name = name.strip() + if name: + font_names.append(name) + return font_names + + def _get_font_size(self, props: Mapping[str, str]) -> float | None: + size = props.get("font-size") + if size is None: + return size + return self._pt_to_float(size) + + def _select_font_family(self, font_names: Sequence[str]) -> int | None: + family = None + for name in font_names: + family = self.FAMILY_MAP.get(name) + if family: + break + + return family + + def color_to_excel(self, val: str | None) -> str | None: + if val is None: + return None + + if self._is_hex_color(val): + return self._convert_hex_to_excel(val) + + try: + return self.NAMED_COLORS[val] + except KeyError: + warnings.warn( + f"Unhandled color format: {repr(val)}", + CSSWarning, + stacklevel=find_stack_level(), + ) + return None + + def _is_hex_color(self, color_string: str) -> bool: + return bool(color_string.startswith("#")) + + def _convert_hex_to_excel(self, color_string: str) -> str: + code = color_string.lstrip("#") + if self._is_shorthand_color(color_string): + return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper() + else: + return code.upper() + + def _is_shorthand_color(self, color_string: str) -> bool: + """Check if color code is shorthand. + + #FFF is a shorthand as opposed to full #FFFFFF. + """ + code = color_string.lstrip("#") + if len(code) == 3: + return True + elif len(code) == 6: + return False + else: + raise ValueError(f"Unexpected color {color_string}") + + +class ExcelFormatter: + """ + Class for formatting a DataFrame to a list of ExcelCells, + + Parameters + ---------- + df : DataFrame or Styler + na_rep: na representation + float_format : str, default None + Format string for floating point numbers + cols : sequence, optional + Columns to write + header : bool or sequence of str, default True + Write out column names. If a list of string is given it is + assumed to be aliases for the column names + index : bool, default True + output row names (index) + index_label : str or sequence, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. + merge_cells : bool, default False + Format MultiIndex and Hierarchical Rows as merged cells. + inf_rep : str, default `'inf'` + representation for np.inf values (which aren't representable in Excel) + A `'-'` sign will be added in front of -inf. + style_converter : callable, optional + This translates Styler styles (CSS) into ExcelWriter styles. + Defaults to ``CSSToExcelConverter()``. + It should have signature css_declarations string -> excel style. + This is only called for body cells. + """ + + max_rows = 2**20 + max_cols = 2**14 + + def __init__( + self, + df, + na_rep: str = "", + float_format: str | None = None, + cols: Sequence[Hashable] | None = None, + header: Sequence[Hashable] | bool = True, + index: bool = True, + index_label: IndexLabel | None = None, + merge_cells: bool = False, + inf_rep: str = "inf", + style_converter: Callable | None = None, + ) -> None: + self.rowcounter = 0 + self.na_rep = na_rep + if not isinstance(df, DataFrame): + self.styler = df + self.styler._compute() # calculate applied styles + df = df.data + if style_converter is None: + style_converter = CSSToExcelConverter() + self.style_converter: Callable | None = style_converter + else: + self.styler = None + self.style_converter = None + self.df = df + if cols is not None: + # all missing, raise + if not len(Index(cols).intersection(df.columns)): + raise KeyError("passes columns are not ALL present dataframe") + + if len(Index(cols).intersection(df.columns)) != len(set(cols)): + # Deprecated in GH#17295, enforced in 1.0.0 + raise KeyError("Not all names specified in 'columns' are found") + + self.df = df.reindex(columns=cols) + + self.columns = self.df.columns + self.float_format = float_format + self.index = index + self.index_label = index_label + self.header = header + self.merge_cells = merge_cells + self.inf_rep = inf_rep + + @property + def header_style(self) -> dict[str, dict[str, str | bool]]: + return { + "font": {"bold": True}, + "borders": { + "top": "thin", + "right": "thin", + "bottom": "thin", + "left": "thin", + }, + "alignment": {"horizontal": "center", "vertical": "top"}, + } + + def _format_value(self, val): + if is_scalar(val) and missing.isna(val): + val = self.na_rep + elif is_float(val): + if missing.isposinf_scalar(val): + val = self.inf_rep + elif missing.isneginf_scalar(val): + val = f"-{self.inf_rep}" + elif self.float_format is not None: + val = float(self.float_format % val) + if getattr(val, "tzinfo", None) is not None: + raise ValueError( + "Excel does not support datetimes with " + "timezones. Please ensure that datetimes " + "are timezone unaware before writing to Excel." + ) + return val + + def _format_header_mi(self) -> Iterable[ExcelCell]: + if self.columns.nlevels > 1: + if not self.index: + raise NotImplementedError( + "Writing to Excel with MultiIndex columns and no " + "index ('index'=False) is not yet implemented." + ) + + if not (self._has_aliases or self.header): + return + + columns = self.columns + level_strs = columns._format_multi( + sparsify=self.merge_cells, include_names=False + ) + level_lengths = get_level_lengths(level_strs) + coloffset = 0 + lnum = 0 + + if self.index and isinstance(self.df.index, MultiIndex): + coloffset = len(self.df.index[0]) - 1 + + if self.merge_cells: + # Format multi-index as a merged cells. + for lnum, name in enumerate(columns.names): + yield ExcelCell( + row=lnum, + col=coloffset, + val=name, + style=self.header_style, + ) + + for lnum, (spans, levels, level_codes) in enumerate( + zip(level_lengths, columns.levels, columns.codes) + ): + values = levels.take(level_codes) + for i, span_val in spans.items(): + mergestart, mergeend = None, None + if span_val > 1: + mergestart, mergeend = lnum, coloffset + i + span_val + yield CssExcelCell( + row=lnum, + col=coloffset + i + 1, + val=values[i], + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=lnum, + css_col=i, + css_converter=self.style_converter, + mergestart=mergestart, + mergeend=mergeend, + ) + else: + # Format in legacy format with dots to indicate levels. + for i, values in enumerate(zip(*level_strs)): + v = ".".join(map(pprint_thing, values)) + yield CssExcelCell( + row=lnum, + col=coloffset + i + 1, + val=v, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=lnum, + css_col=i, + css_converter=self.style_converter, + ) + + self.rowcounter = lnum + + def _format_header_regular(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + coloffset = 0 + + if self.index: + coloffset = 1 + if isinstance(self.df.index, MultiIndex): + coloffset = len(self.df.index.names) + + colnames = self.columns + if self._has_aliases: + self.header = cast(Sequence, self.header) + if len(self.header) != len(self.columns): + raise ValueError( + f"Writing {len(self.columns)} cols " + f"but got {len(self.header)} aliases" + ) + colnames = self.header + + for colindex, colname in enumerate(colnames): + yield CssExcelCell( + row=self.rowcounter, + col=colindex + coloffset, + val=colname, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_columns", None), + css_row=0, + css_col=colindex, + css_converter=self.style_converter, + ) + + def _format_header(self) -> Iterable[ExcelCell]: + gen: Iterable[ExcelCell] + + if isinstance(self.columns, MultiIndex): + gen = self._format_header_mi() + else: + gen = self._format_header_regular() + + gen2: Iterable[ExcelCell] = () + + if self.df.index.names: + row = [x if x is not None else "" for x in self.df.index.names] + [ + "" + ] * len(self.columns) + if functools.reduce(lambda x, y: x and y, (x != "" for x in row)): + gen2 = ( + ExcelCell(self.rowcounter, colindex, val, self.header_style) + for colindex, val in enumerate(row) + ) + self.rowcounter += 1 + return itertools.chain(gen, gen2) + + def _format_body(self) -> Iterable[ExcelCell]: + if isinstance(self.df.index, MultiIndex): + return self._format_hierarchical_rows() + else: + return self._format_regular_rows() + + def _format_regular_rows(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + self.rowcounter += 1 + + # output index and index_label? + if self.index: + # check aliases + # if list only take first as this is not a MultiIndex + if self.index_label and isinstance( + self.index_label, (list, tuple, np.ndarray, Index) + ): + index_label = self.index_label[0] + # if string good to go + elif self.index_label and isinstance(self.index_label, str): + index_label = self.index_label + else: + index_label = self.df.index.names[0] + + if isinstance(self.columns, MultiIndex): + self.rowcounter += 1 + + if index_label and self.header is not False: + yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style) + + # write index_values + index_values = self.df.index + if isinstance(self.df.index, PeriodIndex): + index_values = self.df.index.to_timestamp() + + for idx, idxval in enumerate(index_values): + yield CssExcelCell( + row=self.rowcounter + idx, + col=0, + val=idxval, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=idx, + css_col=0, + css_converter=self.style_converter, + ) + coloffset = 1 + else: + coloffset = 0 + + yield from self._generate_body(coloffset) + + def _format_hierarchical_rows(self) -> Iterable[ExcelCell]: + if self._has_aliases or self.header: + self.rowcounter += 1 + + gcolidx = 0 + + if self.index: + index_labels = self.df.index.names + # check for aliases + if self.index_label and isinstance( + self.index_label, (list, tuple, np.ndarray, Index) + ): + index_labels = self.index_label + + # MultiIndex columns require an extra row + # with index names (blank if None) for + # unambiguous round-trip, unless not merging, + # in which case the names all go on one row Issue #11328 + if isinstance(self.columns, MultiIndex) and self.merge_cells: + self.rowcounter += 1 + + # if index labels are not empty go ahead and dump + if com.any_not_none(*index_labels) and self.header is not False: + for cidx, name in enumerate(index_labels): + yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style) + + if self.merge_cells: + # Format hierarchical rows as merged cells. + level_strs = self.df.index._format_multi( + sparsify=True, include_names=False + ) + level_lengths = get_level_lengths(level_strs) + + for spans, levels, level_codes in zip( + level_lengths, self.df.index.levels, self.df.index.codes + ): + values = levels.take( + level_codes, + allow_fill=levels._can_hold_na, + fill_value=levels._na_value, + ) + + for i, span_val in spans.items(): + mergestart, mergeend = None, None + if span_val > 1: + mergestart = self.rowcounter + i + span_val - 1 + mergeend = gcolidx + yield CssExcelCell( + row=self.rowcounter + i, + col=gcolidx, + val=values[i], + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=i, + css_col=gcolidx, + css_converter=self.style_converter, + mergestart=mergestart, + mergeend=mergeend, + ) + gcolidx += 1 + + else: + # Format hierarchical rows with non-merged values. + for indexcolvals in zip(*self.df.index): + for idx, indexcolval in enumerate(indexcolvals): + yield CssExcelCell( + row=self.rowcounter + idx, + col=gcolidx, + val=indexcolval, + style=self.header_style, + css_styles=getattr(self.styler, "ctx_index", None), + css_row=idx, + css_col=gcolidx, + css_converter=self.style_converter, + ) + gcolidx += 1 + + yield from self._generate_body(gcolidx) + + @property + def _has_aliases(self) -> bool: + """Whether the aliases for column names are present.""" + return is_list_like(self.header) + + def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]: + # Write the body of the frame data series by series. + for colidx in range(len(self.columns)): + series = self.df.iloc[:, colidx] + for i, val in enumerate(series): + yield CssExcelCell( + row=self.rowcounter + i, + col=colidx + coloffset, + val=val, + style=None, + css_styles=getattr(self.styler, "ctx", None), + css_row=i, + css_col=colidx, + css_converter=self.style_converter, + ) + + def get_formatted_cells(self) -> Iterable[ExcelCell]: + for cell in itertools.chain(self._format_header(), self._format_body()): + cell.val = self._format_value(cell.val) + yield cell + + @doc(storage_options=_shared_docs["storage_options"]) + def write( + self, + writer: FilePath | WriteExcelBuffer | ExcelWriter, + sheet_name: str = "Sheet1", + startrow: int = 0, + startcol: int = 0, + freeze_panes: tuple[int, int] | None = None, + engine: str | None = None, + storage_options: StorageOptions | None = None, + engine_kwargs: dict | None = None, + ) -> None: + """ + writer : path-like, file-like, or ExcelWriter object + File path or existing ExcelWriter + sheet_name : str, default 'Sheet1' + Name of sheet which will contain DataFrame + startrow : + upper left cell row to dump data frame + startcol : + upper left cell column to dump data frame + freeze_panes : tuple of integer (length 2), default None + Specifies the one-based bottommost row and rightmost column that + is to be frozen + engine : string, default None + write engine to use if writer is a path - you can also set this + via the options ``io.excel.xlsx.writer``, + or ``io.excel.xlsm.writer``. + + {storage_options} + + engine_kwargs: dict, optional + Arbitrary keyword arguments passed to excel engine. + """ + from pandas.io.excel import ExcelWriter + + num_rows, num_cols = self.df.shape + if num_rows > self.max_rows or num_cols > self.max_cols: + raise ValueError( + f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} " + f"Max sheet size is: {self.max_rows}, {self.max_cols}" + ) + + if engine_kwargs is None: + engine_kwargs = {} + + formatted_cells = self.get_formatted_cells() + if isinstance(writer, ExcelWriter): + need_save = False + else: + writer = ExcelWriter( + writer, + engine=engine, + storage_options=storage_options, + engine_kwargs=engine_kwargs, + ) + need_save = True + + try: + writer._write_cells( + formatted_cells, + sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + ) + finally: + # make sure to close opened file handles + if need_save: + writer.close() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/format.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/format.py new file mode 100644 index 0000000000000000000000000000000000000000..00c7526edfa4894fab655cb5bbfdf2aa93c4e96d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/format.py @@ -0,0 +1,2058 @@ +""" +Internal module for formatting output data in csv, html, xml, +and latex files. This module also applies to display formatting. +""" +from __future__ import annotations + +from collections.abc import ( + Generator, + Hashable, + Mapping, + Sequence, +) +from contextlib import contextmanager +from csv import QUOTE_NONE +from decimal import Decimal +from functools import partial +from io import StringIO +import math +import re +from shutil import get_terminal_size +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Final, + cast, +) + +import numpy as np + +from pandas._config.config import ( + get_option, + set_option, +) + +from pandas._libs import lib +from pandas._libs.missing import NA +from pandas._libs.tslibs import ( + NaT, + Timedelta, + Timestamp, +) +from pandas._libs.tslibs.nattype import NaTType + +from pandas.core.dtypes.common import ( + is_complex_dtype, + is_float, + is_integer, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + TimedeltaArray, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.indexes.api import ( + Index, + MultiIndex, + PeriodIndex, + ensure_index, +) +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.reshape.concat import concat + +from pandas.io.common import ( + check_parent_directory, + stringify_path, +) +from pandas.io.formats import printing + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Axes, + ColspaceArgType, + ColspaceType, + CompressionOptions, + FilePath, + FloatFormatType, + FormattersType, + IndexLabel, + SequenceNotStr, + StorageOptions, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +common_docstring: Final = """ + Parameters + ---------- + buf : str, Path or StringIO-like, optional, default None + Buffer to write to. If None, the output is returned as a string. + columns : array-like, optional, default None + The subset of columns to write. Writes all columns by default. + col_space : %(col_space_type)s, optional + %(col_space)s. + header : %(header_type)s, optional + %(header)s. + index : bool, optional, default True + Whether to print index (row) labels. + na_rep : str, optional, default 'NaN' + String representation of ``NaN`` to use. + formatters : list, tuple or dict of one-param. functions, optional + Formatter functions to apply to columns' elements by position or + name. + The result of each function must be a unicode string. + List/tuple must be of length equal to the number of columns. + float_format : one-parameter function, optional, default None + Formatter function to apply to columns' elements if they are + floats. This function must return a unicode string and will be + applied only to the non-``NaN`` elements, with ``NaN`` being + handled by ``na_rep``. + sparsify : bool, optional, default True + Set to False for a DataFrame with a hierarchical index to print + every multiindex key at each row. + index_names : bool, optional, default True + Prints the names of the indexes. + justify : str, default None + How to justify the column labels. If None uses the option from + the print configuration (controlled by set_option), 'right' out + of the box. Valid values are + + * left + * right + * center + * justify + * justify-all + * start + * end + * inherit + * match-parent + * initial + * unset. + max_rows : int, optional + Maximum number of rows to display in the console. + max_cols : int, optional + Maximum number of columns to display in the console. + show_dimensions : bool, default False + Display DataFrame dimensions (number of rows by number of columns). + decimal : str, default '.' + Character recognized as decimal separator, e.g. ',' in Europe. + """ + +VALID_JUSTIFY_PARAMETERS = ( + "left", + "right", + "center", + "justify", + "justify-all", + "start", + "end", + "inherit", + "match-parent", + "initial", + "unset", +) + +return_docstring: Final = """ + Returns + ------- + str or None + If buf is None, returns the result as a string. Otherwise returns + None. + """ + + +class SeriesFormatter: + """ + Implement the main logic of Series.to_string, which underlies + Series.__repr__. + """ + + def __init__( + self, + series: Series, + *, + length: bool | str = True, + header: bool = True, + index: bool = True, + na_rep: str = "NaN", + name: bool = False, + float_format: str | None = None, + dtype: bool = True, + max_rows: int | None = None, + min_rows: int | None = None, + ) -> None: + self.series = series + self.buf = StringIO() + self.name = name + self.na_rep = na_rep + self.header = header + self.length = length + self.index = index + self.max_rows = max_rows + self.min_rows = min_rows + + if float_format is None: + float_format = get_option("display.float_format") + self.float_format = float_format + self.dtype = dtype + self.adj = printing.get_adjustment() + + self._chk_truncate() + + def _chk_truncate(self) -> None: + self.tr_row_num: int | None + + min_rows = self.min_rows + max_rows = self.max_rows + # truncation determined by max_rows, actual truncated number of rows + # used below by min_rows + is_truncated_vertically = max_rows and (len(self.series) > max_rows) + series = self.series + if is_truncated_vertically: + max_rows = cast(int, max_rows) + if min_rows: + # if min_rows is set (not None or 0), set max_rows to minimum + # of both + max_rows = min(min_rows, max_rows) + if max_rows == 1: + row_num = max_rows + series = series.iloc[:max_rows] + else: + row_num = max_rows // 2 + series = concat((series.iloc[:row_num], series.iloc[-row_num:])) + self.tr_row_num = row_num + else: + self.tr_row_num = None + self.tr_series = series + self.is_truncated_vertically = is_truncated_vertically + + def _get_footer(self) -> str: + name = self.series.name + footer = "" + + index = self.series.index + if ( + isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)) + and index.freq is not None + ): + footer += f"Freq: {index.freqstr}" + + if self.name is not False and name is not None: + if footer: + footer += ", " + + series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n")) + footer += f"Name: {series_name}" + + if self.length is True or ( + self.length == "truncate" and self.is_truncated_vertically + ): + if footer: + footer += ", " + footer += f"Length: {len(self.series)}" + + if self.dtype is not False and self.dtype is not None: + dtype_name = getattr(self.tr_series.dtype, "name", None) + if dtype_name: + if footer: + footer += ", " + footer += f"dtype: {printing.pprint_thing(dtype_name)}" + + # level infos are added to the end and in a new line, like it is done + # for Categoricals + if isinstance(self.tr_series.dtype, CategoricalDtype): + level_info = self.tr_series._values._get_repr_footer() + if footer: + footer += "\n" + footer += level_info + + return str(footer) + + def _get_formatted_values(self) -> list[str]: + return format_array( + self.tr_series._values, + None, + float_format=self.float_format, + na_rep=self.na_rep, + leading_space=self.index, + ) + + def to_string(self) -> str: + series = self.tr_series + footer = self._get_footer() + + if len(series) == 0: + return f"{type(self.series).__name__}([], {footer})" + + index = series.index + have_header = _has_names(index) + if isinstance(index, MultiIndex): + fmt_index = index._format_multi(include_names=True, sparsify=None) + adj = printing.get_adjustment() + fmt_index = adj.adjoin(2, *fmt_index).split("\n") + else: + fmt_index = index._format_flat(include_name=True) + fmt_values = self._get_formatted_values() + + if self.is_truncated_vertically: + n_header_rows = 0 + row_num = self.tr_row_num + row_num = cast(int, row_num) + width = self.adj.len(fmt_values[row_num - 1]) + if width > 3: + dot_str = "..." + else: + dot_str = ".." + # Series uses mode=center because it has single value columns + # DataFrame uses mode=left + dot_str = self.adj.justify([dot_str], width, mode="center")[0] + fmt_values.insert(row_num + n_header_rows, dot_str) + fmt_index.insert(row_num + 1, "") + + if self.index: + result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values]) + else: + result = self.adj.adjoin(3, fmt_values) + + if self.header and have_header: + result = fmt_index[0] + "\n" + result + + if footer: + result += "\n" + footer + + return str("".join(result)) + + +def get_dataframe_repr_params() -> dict[str, Any]: + """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. + + Supplying these parameters to DataFrame.to_string is equivalent to calling + ``repr(DataFrame)``. This is useful if you want to adjust the repr output. + + .. versionadded:: 1.4.0 + + Example + ------- + >>> import pandas as pd + >>> + >>> df = pd.DataFrame([[1, 2], [3, 4]]) + >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() + >>> repr(df) == df.to_string(**repr_params) + True + """ + from pandas.io.formats import console + + if get_option("display.expand_frame_repr"): + line_width, _ = console.get_console_size() + else: + line_width = None + return { + "max_rows": get_option("display.max_rows"), + "min_rows": get_option("display.min_rows"), + "max_cols": get_option("display.max_columns"), + "max_colwidth": get_option("display.max_colwidth"), + "show_dimensions": get_option("display.show_dimensions"), + "line_width": line_width, + } + + +def get_series_repr_params() -> dict[str, Any]: + """Get the parameters used to repr(Series) calls using Series.to_string. + + Supplying these parameters to Series.to_string is equivalent to calling + ``repr(series)``. This is useful if you want to adjust the series repr output. + + .. versionadded:: 1.4.0 + + Example + ------- + >>> import pandas as pd + >>> + >>> ser = pd.Series([1, 2, 3, 4]) + >>> repr_params = pd.io.formats.format.get_series_repr_params() + >>> repr(ser) == ser.to_string(**repr_params) + True + """ + width, height = get_terminal_size() + max_rows_opt = get_option("display.max_rows") + max_rows = height if max_rows_opt == 0 else max_rows_opt + min_rows = height if max_rows_opt == 0 else get_option("display.min_rows") + + return { + "name": True, + "dtype": True, + "min_rows": min_rows, + "max_rows": max_rows, + "length": get_option("display.show_dimensions"), + } + + +class DataFrameFormatter: + """ + Class for processing dataframe formatting options and data. + + Used by DataFrame.to_string, which backs DataFrame.__repr__. + """ + + __doc__ = __doc__ if __doc__ else "" + __doc__ += common_docstring + return_docstring + + def __init__( + self, + frame: DataFrame, + columns: Axes | None = None, + col_space: ColspaceArgType | None = None, + header: bool | SequenceNotStr[str] = True, + index: bool = True, + na_rep: str = "NaN", + formatters: FormattersType | None = None, + justify: str | None = None, + float_format: FloatFormatType | None = None, + sparsify: bool | None = None, + index_names: bool = True, + max_rows: int | None = None, + min_rows: int | None = None, + max_cols: int | None = None, + show_dimensions: bool | str = False, + decimal: str = ".", + bold_rows: bool = False, + escape: bool = True, + ) -> None: + self.frame = frame + self.columns = self._initialize_columns(columns) + self.col_space = self._initialize_colspace(col_space) + self.header = header + self.index = index + self.na_rep = na_rep + self.formatters = self._initialize_formatters(formatters) + self.justify = self._initialize_justify(justify) + self.float_format = float_format + self.sparsify = self._initialize_sparsify(sparsify) + self.show_index_names = index_names + self.decimal = decimal + self.bold_rows = bold_rows + self.escape = escape + self.max_rows = max_rows + self.min_rows = min_rows + self.max_cols = max_cols + self.show_dimensions = show_dimensions + + self.max_cols_fitted = self._calc_max_cols_fitted() + self.max_rows_fitted = self._calc_max_rows_fitted() + + self.tr_frame = self.frame + self.truncate() + self.adj = printing.get_adjustment() + + def get_strcols(self) -> list[list[str]]: + """ + Render a DataFrame to a list of columns (as lists of strings). + """ + strcols = self._get_strcols_without_index() + + if self.index: + str_index = self._get_formatted_index(self.tr_frame) + strcols.insert(0, str_index) + + return strcols + + @property + def should_show_dimensions(self) -> bool: + return self.show_dimensions is True or ( + self.show_dimensions == "truncate" and self.is_truncated + ) + + @property + def is_truncated(self) -> bool: + return bool(self.is_truncated_horizontally or self.is_truncated_vertically) + + @property + def is_truncated_horizontally(self) -> bool: + return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted)) + + @property + def is_truncated_vertically(self) -> bool: + return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted)) + + @property + def dimensions_info(self) -> str: + return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]" + + @property + def has_index_names(self) -> bool: + return _has_names(self.frame.index) + + @property + def has_column_names(self) -> bool: + return _has_names(self.frame.columns) + + @property + def show_row_idx_names(self) -> bool: + return all((self.has_index_names, self.index, self.show_index_names)) + + @property + def show_col_idx_names(self) -> bool: + return all((self.has_column_names, self.show_index_names, self.header)) + + @property + def max_rows_displayed(self) -> int: + return min(self.max_rows or len(self.frame), len(self.frame)) + + def _initialize_sparsify(self, sparsify: bool | None) -> bool: + if sparsify is None: + return get_option("display.multi_sparse") + return sparsify + + def _initialize_formatters( + self, formatters: FormattersType | None + ) -> FormattersType: + if formatters is None: + return {} + elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict): + return formatters + else: + raise ValueError( + f"Formatters length({len(formatters)}) should match " + f"DataFrame number of columns({len(self.frame.columns)})" + ) + + def _initialize_justify(self, justify: str | None) -> str: + if justify is None: + return get_option("display.colheader_justify") + else: + return justify + + def _initialize_columns(self, columns: Axes | None) -> Index: + if columns is not None: + cols = ensure_index(columns) + self.frame = self.frame[cols] + return cols + else: + return self.frame.columns + + def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType: + result: ColspaceType + + if col_space is None: + result = {} + elif isinstance(col_space, (int, str)): + result = {"": col_space} + result.update({column: col_space for column in self.frame.columns}) + elif isinstance(col_space, Mapping): + for column in col_space.keys(): + if column not in self.frame.columns and column != "": + raise ValueError( + f"Col_space is defined for an unknown column: {column}" + ) + result = col_space + else: + if len(self.frame.columns) != len(col_space): + raise ValueError( + f"Col_space length({len(col_space)}) should match " + f"DataFrame number of columns({len(self.frame.columns)})" + ) + result = dict(zip(self.frame.columns, col_space)) + return result + + def _calc_max_cols_fitted(self) -> int | None: + """Number of columns fitting the screen.""" + if not self._is_in_terminal(): + return self.max_cols + + width, _ = get_terminal_size() + if self._is_screen_narrow(width): + return width + else: + return self.max_cols + + def _calc_max_rows_fitted(self) -> int | None: + """Number of rows with data fitting the screen.""" + max_rows: int | None + + if self._is_in_terminal(): + _, height = get_terminal_size() + if self.max_rows == 0: + # rows available to fill with actual data + return height - self._get_number_of_auxiliary_rows() + + if self._is_screen_short(height): + max_rows = height + else: + max_rows = self.max_rows + else: + max_rows = self.max_rows + + return self._adjust_max_rows(max_rows) + + def _adjust_max_rows(self, max_rows: int | None) -> int | None: + """Adjust max_rows using display logic. + + See description here: + https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options + + GH #37359 + """ + if max_rows: + if (len(self.frame) > max_rows) and self.min_rows: + # if truncated, set max_rows showed to min_rows + max_rows = min(self.min_rows, max_rows) + return max_rows + + def _is_in_terminal(self) -> bool: + """Check if the output is to be shown in terminal.""" + return bool(self.max_cols == 0 or self.max_rows == 0) + + def _is_screen_narrow(self, max_width) -> bool: + return bool(self.max_cols == 0 and len(self.frame.columns) > max_width) + + def _is_screen_short(self, max_height) -> bool: + return bool(self.max_rows == 0 and len(self.frame) > max_height) + + def _get_number_of_auxiliary_rows(self) -> int: + """Get number of rows occupied by prompt, dots and dimension info.""" + dot_row = 1 + prompt_row = 1 + num_rows = dot_row + prompt_row + + if self.show_dimensions: + num_rows += len(self.dimensions_info.splitlines()) + + if self.header: + num_rows += 1 + + return num_rows + + def truncate(self) -> None: + """ + Check whether the frame should be truncated. If so, slice the frame up. + """ + if self.is_truncated_horizontally: + self._truncate_horizontally() + + if self.is_truncated_vertically: + self._truncate_vertically() + + def _truncate_horizontally(self) -> None: + """Remove columns, which are not to be displayed and adjust formatters. + + Attributes affected: + - tr_frame + - formatters + - tr_col_num + """ + assert self.max_cols_fitted is not None + col_num = self.max_cols_fitted // 2 + if col_num >= 1: + left = self.tr_frame.iloc[:, :col_num] + right = self.tr_frame.iloc[:, -col_num:] + self.tr_frame = concat((left, right), axis=1) + + # truncate formatter + if isinstance(self.formatters, (list, tuple)): + self.formatters = [ + *self.formatters[:col_num], + *self.formatters[-col_num:], + ] + else: + col_num = cast(int, self.max_cols) + self.tr_frame = self.tr_frame.iloc[:, :col_num] + self.tr_col_num = col_num + + def _truncate_vertically(self) -> None: + """Remove rows, which are not to be displayed. + + Attributes affected: + - tr_frame + - tr_row_num + """ + assert self.max_rows_fitted is not None + row_num = self.max_rows_fitted // 2 + if row_num >= 1: + _len = len(self.tr_frame) + _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)]) + self.tr_frame = self.tr_frame.iloc[_slice] + else: + row_num = cast(int, self.max_rows) + self.tr_frame = self.tr_frame.iloc[:row_num, :] + self.tr_row_num = row_num + + def _get_strcols_without_index(self) -> list[list[str]]: + strcols: list[list[str]] = [] + + if not is_list_like(self.header) and not self.header: + for i, c in enumerate(self.tr_frame): + fmt_values = self.format_col(i) + fmt_values = _make_fixed_width( + strings=fmt_values, + justify=self.justify, + minimum=int(self.col_space.get(c, 0)), + adj=self.adj, + ) + strcols.append(fmt_values) + return strcols + + if is_list_like(self.header): + # cast here since can't be bool if is_list_like + self.header = cast(list[str], self.header) + if len(self.header) != len(self.columns): + raise ValueError( + f"Writing {len(self.columns)} cols " + f"but got {len(self.header)} aliases" + ) + str_columns = [[label] for label in self.header] + else: + str_columns = self._get_formatted_column_labels(self.tr_frame) + + if self.show_row_idx_names: + for x in str_columns: + x.append("") + + for i, c in enumerate(self.tr_frame): + cheader = str_columns[i] + header_colwidth = max( + int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader) + ) + fmt_values = self.format_col(i) + fmt_values = _make_fixed_width( + fmt_values, self.justify, minimum=header_colwidth, adj=self.adj + ) + + max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth) + cheader = self.adj.justify(cheader, max_len, mode=self.justify) + strcols.append(cheader + fmt_values) + + return strcols + + def format_col(self, i: int) -> list[str]: + frame = self.tr_frame + formatter = self._get_formatter(i) + return format_array( + frame.iloc[:, i]._values, + formatter, + float_format=self.float_format, + na_rep=self.na_rep, + space=self.col_space.get(frame.columns[i]), + decimal=self.decimal, + leading_space=self.index, + ) + + def _get_formatter(self, i: str | int) -> Callable | None: + if isinstance(self.formatters, (list, tuple)): + if is_integer(i): + i = cast(int, i) + return self.formatters[i] + else: + return None + else: + if is_integer(i) and i not in self.columns: + i = self.columns[i] + return self.formatters.get(i, None) + + def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]: + from pandas.core.indexes.multi import sparsify_labels + + columns = frame.columns + + if isinstance(columns, MultiIndex): + fmt_columns = columns._format_multi(sparsify=False, include_names=False) + fmt_columns = list(zip(*fmt_columns)) + dtypes = self.frame.dtypes._values + + # if we have a Float level, they don't use leading space at all + restrict_formatting = any(level.is_floating for level in columns.levels) + need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) + + def space_format(x, y): + if ( + y not in self.formatters + and need_leadsp[x] + and not restrict_formatting + ): + return " " + y + return y + + str_columns_tuple = list( + zip(*([space_format(x, y) for y in x] for x in fmt_columns)) + ) + if self.sparsify and len(str_columns_tuple): + str_columns_tuple = sparsify_labels(str_columns_tuple) + + str_columns = [list(x) for x in zip(*str_columns_tuple)] + else: + fmt_columns = columns._format_flat(include_name=False) + dtypes = self.frame.dtypes + need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes))) + str_columns = [ + [" " + x if not self._get_formatter(i) and need_leadsp[x] else x] + for i, x in enumerate(fmt_columns) + ] + # self.str_columns = str_columns + return str_columns + + def _get_formatted_index(self, frame: DataFrame) -> list[str]: + # Note: this is only used by to_string() and to_latex(), not by + # to_html(). so safe to cast col_space here. + col_space = {k: cast(int, v) for k, v in self.col_space.items()} + index = frame.index + columns = frame.columns + fmt = self._get_formatter("__index__") + + if isinstance(index, MultiIndex): + fmt_index = index._format_multi( + sparsify=self.sparsify, + include_names=self.show_row_idx_names, + formatter=fmt, + ) + else: + fmt_index = [ + index._format_flat(include_name=self.show_row_idx_names, formatter=fmt) + ] + + fmt_index = [ + tuple( + _make_fixed_width( + list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj + ) + ) + for x in fmt_index + ] + + adjoined = self.adj.adjoin(1, *fmt_index).split("\n") + + # empty space for columns + if self.show_col_idx_names: + col_header = [str(x) for x in self._get_column_name_list()] + else: + col_header = [""] * columns.nlevels + + if self.header: + return col_header + adjoined + else: + return adjoined + + def _get_column_name_list(self) -> list[Hashable]: + names: list[Hashable] = [] + columns = self.frame.columns + if isinstance(columns, MultiIndex): + names.extend("" if name is None else name for name in columns.names) + else: + names.append("" if columns.name is None else columns.name) + return names + + +class DataFrameRenderer: + """Class for creating dataframe output in multiple formats. + + Called in pandas.core.generic.NDFrame: + - to_csv + - to_latex + + Called in pandas.core.frame.DataFrame: + - to_html + - to_string + + Parameters + ---------- + fmt : DataFrameFormatter + Formatter with the formatting options. + """ + + def __init__(self, fmt: DataFrameFormatter) -> None: + self.fmt = fmt + + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, + classes: str | list | tuple | None = None, + notebook: bool = False, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + ) -> str | None: + """ + Render a DataFrame to a html table. + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + encoding : str, default “utf-8” + Set character encoding. + classes : str or list-like + classes to include in the `class` attribute of the opening + ```` tag, in addition to the default "dataframe". + notebook : {True, False}, optional, default False + Whether the generated HTML is for IPython Notebook. + border : int + A ``border=border`` attribute is included in the opening + ``
`` tag. Default ``pd.options.display.html.border``. + table_id : str, optional + A css id is included in the opening `
` tag if specified. + render_links : bool, default False + Convert URLs to HTML links. + """ + from pandas.io.formats.html import ( + HTMLFormatter, + NotebookFormatter, + ) + + Klass = NotebookFormatter if notebook else HTMLFormatter + + html_formatter = Klass( + self.fmt, + classes=classes, + border=border, + table_id=table_id, + render_links=render_links, + ) + string = html_formatter.to_string() + return save_to_buffer(string, buf=buf, encoding=encoding) + + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, + line_width: int | None = None, + ) -> str | None: + """ + Render a DataFrame to a console-friendly tabular output. + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + encoding: str, default “utf-8” + Set character encoding. + line_width : int, optional + Width to wrap a line in characters. + """ + from pandas.io.formats.string import StringFormatter + + string_formatter = StringFormatter(self.fmt, line_width=line_width) + string = string_formatter.to_string() + return save_to_buffer(string, buf=buf, encoding=encoding) + + def to_csv( + self, + path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + encoding: str | None = None, + sep: str = ",", + columns: Sequence[Hashable] | None = None, + index_label: IndexLabel | None = None, + mode: str = "w", + compression: CompressionOptions = "infer", + quoting: int | None = None, + quotechar: str = '"', + lineterminator: str | None = None, + chunksize: int | None = None, + date_format: str | None = None, + doublequote: bool = True, + escapechar: str | None = None, + errors: str = "strict", + storage_options: StorageOptions | None = None, + ) -> str | None: + """ + Render dataframe as comma-separated file. + """ + from pandas.io.formats.csvs import CSVFormatter + + if path_or_buf is None: + created_buffer = True + path_or_buf = StringIO() + else: + created_buffer = False + + csv_formatter = CSVFormatter( + path_or_buf=path_or_buf, + lineterminator=lineterminator, + sep=sep, + encoding=encoding, + errors=errors, + compression=compression, + quoting=quoting, + cols=columns, + index_label=index_label, + mode=mode, + chunksize=chunksize, + quotechar=quotechar, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, + storage_options=storage_options, + formatter=self.fmt, + ) + csv_formatter.save() + + if created_buffer: + assert isinstance(path_or_buf, StringIO) + content = path_or_buf.getvalue() + path_or_buf.close() + return content + + return None + + +def save_to_buffer( + string: str, + buf: FilePath | WriteBuffer[str] | None = None, + encoding: str | None = None, +) -> str | None: + """ + Perform serialization. Write to buf or return as string if buf is None. + """ + with _get_buffer(buf, encoding=encoding) as fd: + fd.write(string) + if buf is None: + # error: "WriteBuffer[str]" has no attribute "getvalue" + return fd.getvalue() # type: ignore[attr-defined] + return None + + +@contextmanager +def _get_buffer( + buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None +) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]: + """ + Context manager to open, yield and close buffer for filenames or Path-like + objects, otherwise yield buf unchanged. + """ + if buf is not None: + buf = stringify_path(buf) + else: + buf = StringIO() + + if encoding is None: + encoding = "utf-8" + elif not isinstance(buf, str): + raise ValueError("buf is not a file name and encoding is specified.") + + if hasattr(buf, "write"): + # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str], + # StringIO]", expected type "Union[WriteBuffer[str], StringIO]") + yield buf # type: ignore[misc] + elif isinstance(buf, str): + check_parent_directory(str(buf)) + with open(buf, "w", encoding=encoding, newline="") as f: + # GH#30034 open instead of codecs.open prevents a file leak + # if we have an invalid encoding argument. + # newline="" is needed to roundtrip correctly on + # windows test_to_latex_filename + yield f + else: + raise TypeError("buf is not a file name and it has no write method") + + +# ---------------------------------------------------------------------- +# Array formatters + + +def format_array( + values: ArrayLike, + formatter: Callable | None, + float_format: FloatFormatType | None = None, + na_rep: str = "NaN", + digits: int | None = None, + space: str | int | None = None, + justify: str = "right", + decimal: str = ".", + leading_space: bool | None = True, + quoting: int | None = None, + fallback_formatter: Callable | None = None, +) -> list[str]: + """ + Format an array for printing. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + formatter + float_format + na_rep + digits + space + justify + decimal + leading_space : bool, optional, default True + Whether the array should be formatted with a leading space. + When an array as a column of a Series or DataFrame, we do want + the leading space to pad between columns. + + When formatting an Index subclass + (e.g. IntervalIndex._get_values_for_csv), we don't want the + leading space since it should be left-aligned. + fallback_formatter + + Returns + ------- + List[str] + """ + fmt_klass: type[_GenericArrayFormatter] + if lib.is_np_dtype(values.dtype, "M"): + fmt_klass = _Datetime64Formatter + values = cast(DatetimeArray, values) + elif isinstance(values.dtype, DatetimeTZDtype): + fmt_klass = _Datetime64TZFormatter + values = cast(DatetimeArray, values) + elif lib.is_np_dtype(values.dtype, "m"): + fmt_klass = _Timedelta64Formatter + values = cast(TimedeltaArray, values) + elif isinstance(values.dtype, ExtensionDtype): + fmt_klass = _ExtensionArrayFormatter + elif lib.is_np_dtype(values.dtype, "fc"): + fmt_klass = FloatArrayFormatter + elif lib.is_np_dtype(values.dtype, "iu"): + fmt_klass = _IntArrayFormatter + else: + fmt_klass = _GenericArrayFormatter + + if space is None: + space = 12 + + if float_format is None: + float_format = get_option("display.float_format") + + if digits is None: + digits = get_option("display.precision") + + fmt_obj = fmt_klass( + values, + digits=digits, + na_rep=na_rep, + float_format=float_format, + formatter=formatter, + space=space, + justify=justify, + decimal=decimal, + leading_space=leading_space, + quoting=quoting, + fallback_formatter=fallback_formatter, + ) + + return fmt_obj.get_result() + + +class _GenericArrayFormatter: + def __init__( + self, + values: ArrayLike, + digits: int = 7, + formatter: Callable | None = None, + na_rep: str = "NaN", + space: str | int = 12, + float_format: FloatFormatType | None = None, + justify: str = "right", + decimal: str = ".", + quoting: int | None = None, + fixed_width: bool = True, + leading_space: bool | None = True, + fallback_formatter: Callable | None = None, + ) -> None: + self.values = values + self.digits = digits + self.na_rep = na_rep + self.space = space + self.formatter = formatter + self.float_format = float_format + self.justify = justify + self.decimal = decimal + self.quoting = quoting + self.fixed_width = fixed_width + self.leading_space = leading_space + self.fallback_formatter = fallback_formatter + + def get_result(self) -> list[str]: + fmt_values = self._format_strings() + return _make_fixed_width(fmt_values, self.justify) + + def _format_strings(self) -> list[str]: + if self.float_format is None: + float_format = get_option("display.float_format") + if float_format is None: + precision = get_option("display.precision") + float_format = lambda x: _trim_zeros_single_float( + f"{x: .{precision:d}f}" + ) + else: + float_format = self.float_format + + if self.formatter is not None: + formatter = self.formatter + elif self.fallback_formatter is not None: + formatter = self.fallback_formatter + else: + quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE + formatter = partial( + printing.pprint_thing, + escape_chars=("\t", "\r", "\n"), + quote_strings=quote_strings, + ) + + def _format(x): + if self.na_rep is not None and is_scalar(x) and isna(x): + if x is None: + return "None" + elif x is NA: + return str(NA) + elif lib.is_float(x) and np.isinf(x): + # TODO(3.0): this will be unreachable when use_inf_as_na + # deprecation is enforced + return str(x) + elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)): + return "NaT" + return self.na_rep + elif isinstance(x, PandasObject): + return str(x) + elif isinstance(x, StringDtype): + return repr(x) + else: + # object dtype + return str(formatter(x)) + + vals = self.values + if not isinstance(vals, np.ndarray): + raise TypeError( + "ExtensionArray formatting should use _ExtensionArrayFormatter" + ) + inferred = lib.map_infer(vals, is_float) + is_float_type = ( + inferred + # vals may have 2 or more dimensions + & np.all(notna(vals), axis=tuple(range(1, len(vals.shape)))) + ) + leading_space = self.leading_space + if leading_space is None: + leading_space = is_float_type.any() + + fmt_values = [] + for i, v in enumerate(vals): + if (not is_float_type[i] or self.formatter is not None) and leading_space: + fmt_values.append(f" {_format(v)}") + elif is_float_type[i]: + fmt_values.append(float_format(v)) + else: + if leading_space is False: + # False specifically, so that the default is + # to include a space if we get here. + tpl = "{v}" + else: + tpl = " {v}" + fmt_values.append(tpl.format(v=_format(v))) + + return fmt_values + + +class FloatArrayFormatter(_GenericArrayFormatter): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + # float_format is expected to be a string + # formatter should be used to pass a function + if self.float_format is not None and self.formatter is None: + # GH21625, GH22270 + self.fixed_width = False + if callable(self.float_format): + self.formatter = self.float_format + self.float_format = None + + def _value_formatter( + self, + float_format: FloatFormatType | None = None, + threshold: float | None = None, + ) -> Callable: + """Returns a function to be applied on each value to format it""" + # the float_format parameter supersedes self.float_format + if float_format is None: + float_format = self.float_format + + # we are going to compose different functions, to first convert to + # a string, then replace the decimal symbol, and finally chop according + # to the threshold + + # when there is no float_format, we use str instead of '%g' + # because str(0.0) = '0.0' while '%g' % 0.0 = '0' + if float_format: + + def base_formatter(v): + assert float_format is not None # for mypy + # error: "str" not callable + # error: Unexpected keyword argument "value" for "__call__" of + # "EngFormatter" + return ( + float_format(value=v) # type: ignore[operator,call-arg] + if notna(v) + else self.na_rep + ) + + else: + + def base_formatter(v): + return str(v) if notna(v) else self.na_rep + + if self.decimal != ".": + + def decimal_formatter(v): + return base_formatter(v).replace(".", self.decimal, 1) + + else: + decimal_formatter = base_formatter + + if threshold is None: + return decimal_formatter + + def formatter(value): + if notna(value): + if abs(value) > threshold: + return decimal_formatter(value) + else: + return decimal_formatter(0.0) + else: + return self.na_rep + + return formatter + + def get_result_as_array(self) -> np.ndarray: + """ + Returns the float values converted into strings using + the parameters given at initialisation, as a numpy array + """ + + def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str): + mask = isna(values) + formatted = np.array( + [ + formatter(val) if not m else na_rep + for val, m in zip(values.ravel(), mask.ravel()) + ] + ).reshape(values.shape) + return formatted + + def format_complex_with_na_rep( + values: ArrayLike, formatter: Callable, na_rep: str + ): + real_values = np.real(values).ravel() # type: ignore[arg-type] + imag_values = np.imag(values).ravel() # type: ignore[arg-type] + real_mask, imag_mask = isna(real_values), isna(imag_values) + formatted_lst = [] + for val, real_val, imag_val, re_isna, im_isna in zip( + values.ravel(), + real_values, + imag_values, + real_mask, + imag_mask, + ): + if not re_isna and not im_isna: + formatted_lst.append(formatter(val)) + elif not re_isna: # xxx+nanj + formatted_lst.append(f"{formatter(real_val)}+{na_rep}j") + elif not im_isna: # nan[+/-]xxxj + # The imaginary part may either start with a "-" or a space + imag_formatted = formatter(imag_val).strip() + if imag_formatted.startswith("-"): + formatted_lst.append(f"{na_rep}{imag_formatted}j") + else: + formatted_lst.append(f"{na_rep}+{imag_formatted}j") + else: # nan+nanj + formatted_lst.append(f"{na_rep}+{na_rep}j") + return np.array(formatted_lst).reshape(values.shape) + + if self.formatter is not None: + return format_with_na_rep(self.values, self.formatter, self.na_rep) + + if self.fixed_width: + threshold = get_option("display.chop_threshold") + else: + threshold = None + + # if we have a fixed_width, we'll need to try different float_format + def format_values_with(float_format): + formatter = self._value_formatter(float_format, threshold) + + # default formatter leaves a space to the left when formatting + # floats, must be consistent for left-justifying NaNs (GH #25061) + na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep + + # different formatting strategies for complex and non-complex data + # need to distinguish complex and float NaNs (GH #53762) + values = self.values + is_complex = is_complex_dtype(values) + + # separate the wheat from the chaff + if is_complex: + values = format_complex_with_na_rep(values, formatter, na_rep) + else: + values = format_with_na_rep(values, formatter, na_rep) + + if self.fixed_width: + if is_complex: + result = _trim_zeros_complex(values, self.decimal) + else: + result = _trim_zeros_float(values, self.decimal) + return np.asarray(result, dtype="object") + + return values + + # There is a special default string when we are fixed-width + # The default is otherwise to use str instead of a formatting string + float_format: FloatFormatType | None + if self.float_format is None: + if self.fixed_width: + if self.leading_space is True: + fmt_str = "{value: .{digits:d}f}" + else: + fmt_str = "{value:.{digits:d}f}" + float_format = partial(fmt_str.format, digits=self.digits) + else: + float_format = self.float_format + else: + float_format = lambda value: self.float_format % value + + formatted_values = format_values_with(float_format) + + if not self.fixed_width: + return formatted_values + + # we need do convert to engineering format if some values are too small + # and would appear as 0, or if some values are too big and take too + # much space + + if len(formatted_values) > 0: + maxlen = max(len(x) for x in formatted_values) + too_long = maxlen > self.digits + 6 + else: + too_long = False + + abs_vals = np.abs(self.values) + # this is pretty arbitrary for now + # large values: more that 8 characters including decimal symbol + # and first digit, hence > 1e6 + has_large_values = (abs_vals > 1e6).any() + has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any() + + if has_small_values or (too_long and has_large_values): + if self.leading_space is True: + fmt_str = "{value: .{digits:d}e}" + else: + fmt_str = "{value:.{digits:d}e}" + float_format = partial(fmt_str.format, digits=self.digits) + formatted_values = format_values_with(float_format) + + return formatted_values + + def _format_strings(self) -> list[str]: + return list(self.get_result_as_array()) + + +class _IntArrayFormatter(_GenericArrayFormatter): + def _format_strings(self) -> list[str]: + if self.leading_space is False: + formatter_str = lambda x: f"{x:d}".format(x=x) + else: + formatter_str = lambda x: f"{x: d}".format(x=x) + formatter = self.formatter or formatter_str + fmt_values = [formatter(x) for x in self.values] + return fmt_values + + +class _Datetime64Formatter(_GenericArrayFormatter): + values: DatetimeArray + + def __init__( + self, + values: DatetimeArray, + nat_rep: str = "NaT", + date_format: None = None, + **kwargs, + ) -> None: + super().__init__(values, **kwargs) + self.nat_rep = nat_rep + self.date_format = date_format + + def _format_strings(self) -> list[str]: + """we by definition have DO NOT have a TZ""" + values = self.values + + if self.formatter is not None: + return [self.formatter(x) for x in values] + + fmt_values = values._format_native_types( + na_rep=self.nat_rep, date_format=self.date_format + ) + return fmt_values.tolist() + + +class _ExtensionArrayFormatter(_GenericArrayFormatter): + values: ExtensionArray + + def _format_strings(self) -> list[str]: + values = self.values + + formatter = self.formatter + fallback_formatter = None + if formatter is None: + fallback_formatter = values._formatter(boxed=True) + + if isinstance(values, Categorical): + # Categorical is special for now, so that we can preserve tzinfo + array = values._internal_get_values() + else: + array = np.asarray(values, dtype=object) + + fmt_values = format_array( + array, + formatter, + float_format=self.float_format, + na_rep=self.na_rep, + digits=self.digits, + space=self.space, + justify=self.justify, + decimal=self.decimal, + leading_space=self.leading_space, + quoting=self.quoting, + fallback_formatter=fallback_formatter, + ) + return fmt_values + + +def format_percentiles( + percentiles: (np.ndarray | Sequence[float]), +) -> list[str]: + """ + Outputs rounded and formatted percentiles. + + Parameters + ---------- + percentiles : list-like, containing floats from interval [0,1] + + Returns + ------- + formatted : list of strings + + Notes + ----- + Rounding precision is chosen so that: (1) if any two elements of + ``percentiles`` differ, they remain different after rounding + (2) no entry is *rounded* to 0% or 100%. + Any non-integer is always rounded to at least 1 decimal place. + + Examples + -------- + Keeps all entries different after rounding: + + >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) + ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] + + No element is rounded to 0% or 100% (unless already equal to it). + Duplicates are allowed: + + >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) + ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] + """ + percentiles = np.asarray(percentiles) + + # It checks for np.nan as well + if ( + not is_numeric_dtype(percentiles) + or not np.all(percentiles >= 0) + or not np.all(percentiles <= 1) + ): + raise ValueError("percentiles should all be in the interval [0,1]") + + percentiles = 100 * percentiles + prec = get_precision(percentiles) + percentiles_round_type = percentiles.round(prec).astype(int) + + int_idx = np.isclose(percentiles_round_type, percentiles) + + if np.all(int_idx): + out = percentiles_round_type.astype(str) + return [i + "%" for i in out] + + unique_pcts = np.unique(percentiles) + prec = get_precision(unique_pcts) + out = np.empty_like(percentiles, dtype=object) + out[int_idx] = percentiles[int_idx].round().astype(int).astype(str) + + out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) + return [i + "%" for i in out] + + +def get_precision(array: np.ndarray | Sequence[float]) -> int: + to_begin = array[0] if array[0] > 0 else None + to_end = 100 - array[-1] if array[-1] < 100 else None + diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end) + diff = abs(diff) + prec = -np.floor(np.log10(np.min(diff))).astype(int) + prec = max(1, prec) + return prec + + +def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str: + if x is NaT: + return nat_rep + + # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ') + # so it already uses string formatting rather than strftime (faster). + return str(x) + + +def _format_datetime64_dateonly( + x: NaTType | Timestamp, + nat_rep: str = "NaT", + date_format: str | None = None, +) -> str: + if isinstance(x, NaTType): + return nat_rep + + if date_format: + return x.strftime(date_format) + else: + # Timestamp._date_repr relies on string formatting (faster than strftime) + return x._date_repr + + +def get_format_datetime64( + is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None +) -> Callable: + """Return a formatter callable taking a datetime64 as input and providing + a string as output""" + + if is_dates_only: + return lambda x: _format_datetime64_dateonly( + x, nat_rep=nat_rep, date_format=date_format + ) + else: + return lambda x: _format_datetime64(x, nat_rep=nat_rep) + + +class _Datetime64TZFormatter(_Datetime64Formatter): + values: DatetimeArray + + def _format_strings(self) -> list[str]: + """we by definition have a TZ""" + ido = self.values._is_dates_only + values = self.values.astype(object) + formatter = self.formatter or get_format_datetime64( + ido, date_format=self.date_format + ) + fmt_values = [formatter(x) for x in values] + + return fmt_values + + +class _Timedelta64Formatter(_GenericArrayFormatter): + values: TimedeltaArray + + def __init__( + self, + values: TimedeltaArray, + nat_rep: str = "NaT", + **kwargs, + ) -> None: + # TODO: nat_rep is never passed, na_rep is. + super().__init__(values, **kwargs) + self.nat_rep = nat_rep + + def _format_strings(self) -> list[str]: + formatter = self.formatter or get_format_timedelta64( + self.values, nat_rep=self.nat_rep, box=False + ) + return [formatter(x) for x in self.values] + + +def get_format_timedelta64( + values: TimedeltaArray, + nat_rep: str | float = "NaT", + box: bool = False, +) -> Callable: + """ + Return a formatter function for a range of timedeltas. + These will all have the same format argument + + If box, then show the return in quotes + """ + even_days = values._is_dates_only + + if even_days: + format = None + else: + format = "long" + + def _formatter(x): + if x is None or (is_scalar(x) and isna(x)): + return nat_rep + + if not isinstance(x, Timedelta): + x = Timedelta(x) + + # Timedelta._repr_base uses string formatting (faster than strftime) + result = x._repr_base(format=format) + if box: + result = f"'{result}'" + return result + + return _formatter + + +def _make_fixed_width( + strings: list[str], + justify: str = "right", + minimum: int | None = None, + adj: printing._TextAdjustment | None = None, +) -> list[str]: + if len(strings) == 0 or justify == "all": + return strings + + if adj is None: + adjustment = printing.get_adjustment() + else: + adjustment = adj + + max_len = max(adjustment.len(x) for x in strings) + + if minimum is not None: + max_len = max(minimum, max_len) + + conf_max = get_option("display.max_colwidth") + if conf_max is not None and max_len > conf_max: + max_len = conf_max + + def just(x: str) -> str: + if conf_max is not None: + if (conf_max > 3) & (adjustment.len(x) > max_len): + x = x[: max_len - 3] + "..." + return x + + strings = [just(x) for x in strings] + result = adjustment.justify(strings, max_len, mode=justify) + return result + + +def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str = ".") -> list[str]: + """ + Separates the real and imaginary parts from the complex number, and + executes the _trim_zeros_float method on each of those. + """ + real_part, imag_part = [], [] + for x in str_complexes: + # Complex numbers are represented as "(-)xxx(+/-)xxxj" + # The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""] + # Therefore, the imaginary part is the 4th and 3rd last elements, + # and the real part is everything before the imaginary part + trimmed = re.split(r"([j+-])", x) + real_part.append("".join(trimmed[:-4])) + imag_part.append("".join(trimmed[-4:-2])) + + # We want to align the lengths of the real and imaginary parts of each complex + # number, as well as the lengths the real (resp. complex) parts of all numbers + # in the array + n = len(str_complexes) + padded_parts = _trim_zeros_float(real_part + imag_part, decimal) + if len(padded_parts) == 0: + return [] + padded_length = max(len(part) for part in padded_parts) - 1 + padded = [ + real_pt # real part, possibly NaN + + imag_pt[0] # +/- + + f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan + + "j" + for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:]) + ] + return padded + + +def _trim_zeros_single_float(str_float: str) -> str: + """ + Trims trailing zeros after a decimal point, + leaving just one if necessary. + """ + str_float = str_float.rstrip("0") + if str_float.endswith("."): + str_float += "0" + + return str_float + + +def _trim_zeros_float( + str_floats: ArrayLike | list[str], decimal: str = "." +) -> list[str]: + """ + Trims the maximum number of trailing zeros equally from + all numbers containing decimals, leaving just one if + necessary. + """ + trimmed = str_floats + number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$") + + def is_number_with_decimal(x) -> bool: + return re.match(number_regex, x) is not None + + def should_trim(values: ArrayLike | list[str]) -> bool: + """ + Determine if an array of strings should be trimmed. + + Returns True if all numbers containing decimals (defined by the + above regular expression) within the array end in a zero, otherwise + returns False. + """ + numbers = [x for x in values if is_number_with_decimal(x)] + return len(numbers) > 0 and all(x.endswith("0") for x in numbers) + + while should_trim(trimmed): + trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed] + + # leave one 0 after the decimal points if need be. + result = [ + x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x + for x in trimmed + ] + return result + + +def _has_names(index: Index) -> bool: + if isinstance(index, MultiIndex): + return com.any_not_none(*index.names) + else: + return index.name is not None + + +class EngFormatter: + """ + Formats float values according to engineering format. + + Based on matplotlib.ticker.EngFormatter + """ + + # The SI engineering prefixes + ENG_PREFIXES = { + -24: "y", + -21: "z", + -18: "a", + -15: "f", + -12: "p", + -9: "n", + -6: "u", + -3: "m", + 0: "", + 3: "k", + 6: "M", + 9: "G", + 12: "T", + 15: "P", + 18: "E", + 21: "Z", + 24: "Y", + } + + def __init__( + self, accuracy: int | None = None, use_eng_prefix: bool = False + ) -> None: + self.accuracy = accuracy + self.use_eng_prefix = use_eng_prefix + + def __call__(self, num: float) -> str: + """ + Formats a number in engineering notation, appending a letter + representing the power of 1000 of the original number. Some examples: + >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True) + >>> format_eng(0) + ' 0' + >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True) + >>> format_eng(1_000_000) + ' 1.0M' + >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False) + >>> format_eng("-1e-6") + '-1.00E-06' + + @param num: the value to represent + @type num: either a numeric value or a string that can be converted to + a numeric value (as per decimal.Decimal constructor) + + @return: engineering formatted string + """ + dnum = Decimal(str(num)) + + if Decimal.is_nan(dnum): + return "NaN" + + if Decimal.is_infinite(dnum): + return "inf" + + sign = 1 + + if dnum < 0: # pragma: no cover + sign = -1 + dnum = -dnum + + if dnum != 0: + pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3)) + else: + pow10 = Decimal(0) + + pow10 = pow10.min(max(self.ENG_PREFIXES.keys())) + pow10 = pow10.max(min(self.ENG_PREFIXES.keys())) + int_pow10 = int(pow10) + + if self.use_eng_prefix: + prefix = self.ENG_PREFIXES[int_pow10] + elif int_pow10 < 0: + prefix = f"E-{-int_pow10:02d}" + else: + prefix = f"E+{int_pow10:02d}" + + mant = sign * dnum / (10**pow10) + + if self.accuracy is None: # pragma: no cover + format_str = "{mant: g}{prefix}" + else: + format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}" + + formatted = format_str.format(mant=mant, prefix=prefix) + + return formatted + + +def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None: + """ + Format float representation in DataFrame with SI notation. + + Parameters + ---------- + accuracy : int, default 3 + Number of decimal digits after the floating point. + use_eng_prefix : bool, default False + Whether to represent a value with SI prefixes. + + Returns + ------- + None + + Examples + -------- + >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6]) + >>> df + 0 + 0 1.000000e-09 + 1 1.000000e-03 + 2 1.000000e+00 + 3 1.000000e+03 + 4 1.000000e+06 + + >>> pd.set_eng_float_format(accuracy=1) + >>> df + 0 + 0 1.0E-09 + 1 1.0E-03 + 2 1.0E+00 + 3 1.0E+03 + 4 1.0E+06 + + >>> pd.set_eng_float_format(use_eng_prefix=True) + >>> df + 0 + 0 1.000n + 1 1.000m + 2 1.000 + 3 1.000k + 4 1.000M + + >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True) + >>> df + 0 + 0 1.0n + 1 1.0m + 2 1.0 + 3 1.0k + 4 1.0M + + >>> pd.set_option("display.float_format", None) # unset option + """ + set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) + + +def get_level_lengths( + levels: Any, sentinel: bool | object | str = "" +) -> list[dict[int, int]]: + """ + For each index in each level the function returns lengths of indexes. + + Parameters + ---------- + levels : list of lists + List of values on for level. + sentinel : string, optional + Value which states that no new index starts on there. + + Returns + ------- + Returns list of maps. For each level returns map of indexes (key is index + in row and value is length of index). + """ + if len(levels) == 0: + return [] + + control = [True] * len(levels[0]) + + result = [] + for level in levels: + last_index = 0 + + lengths = {} + for i, key in enumerate(level): + if control[i] and key == sentinel: + pass + else: + control[i] = False + lengths[last_index] = i - last_index + last_index = i + + lengths[last_index] = len(level) - last_index + + result.append(lengths) + + return result + + +def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None: + """ + Appends lines to a buffer. + + Parameters + ---------- + buf + The buffer to write to + lines + The lines to append. + """ + if any(isinstance(x, str) for x in lines): + lines = [str(x) for x in lines] + buf.write("\n".join(lines)) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/html.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/html.py new file mode 100644 index 0000000000000000000000000000000000000000..794ce77b3b45ec38d9fa58a708939e53bb8ae629 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/html.py @@ -0,0 +1,646 @@ +""" +Module for formatting output data in HTML. +""" +from __future__ import annotations + +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Final, + cast, +) + +from pandas._config import get_option + +from pandas._libs import lib + +from pandas import ( + MultiIndex, + option_context, +) + +from pandas.io.common import is_url +from pandas.io.formats.format import ( + DataFrameFormatter, + get_level_lengths, +) +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + ) + + +class HTMLFormatter: + """ + Internal class for formatting output data in html. + This class is intended for shared functionality between + DataFrame.to_html() and DataFrame._repr_html_(). + Any logic in common with other output formatting methods + should ideally be inherited from classes in format.py + and this class responsible for only producing html markup. + """ + + indent_delta: Final = 2 + + def __init__( + self, + formatter: DataFrameFormatter, + classes: str | list[str] | tuple[str, ...] | None = None, + border: int | bool | None = None, + table_id: str | None = None, + render_links: bool = False, + ) -> None: + self.fmt = formatter + self.classes = classes + + self.frame = self.fmt.frame + self.columns = self.fmt.tr_frame.columns + self.elements: list[str] = [] + self.bold_rows = self.fmt.bold_rows + self.escape = self.fmt.escape + self.show_dimensions = self.fmt.show_dimensions + if border is None or border is True: + border = cast(int, get_option("display.html.border")) + elif not border: + border = None + + self.border = border + self.table_id = table_id + self.render_links = render_links + + self.col_space = {} + is_multi_index = isinstance(self.columns, MultiIndex) + for column, value in self.fmt.col_space.items(): + col_space_value = f"{value}px" if isinstance(value, int) else value + self.col_space[column] = col_space_value + # GH 53885: Handling case where column is index + # Flatten the data in the multi index and add in the map + if is_multi_index and isinstance(column, tuple): + for column_index in column: + self.col_space[str(column_index)] = col_space_value + + def to_string(self) -> str: + lines = self.render() + if any(isinstance(x, str) for x in lines): + lines = [str(x) for x in lines] + return "\n".join(lines) + + def render(self) -> list[str]: + self._write_table() + + if self.should_show_dimensions: + by = chr(215) # × # noqa: RUF003 + self.write( + f"

{len(self.frame)} rows {by} {len(self.frame.columns)} columns

" + ) + + return self.elements + + @property + def should_show_dimensions(self) -> bool: + return self.fmt.should_show_dimensions + + @property + def show_row_idx_names(self) -> bool: + return self.fmt.show_row_idx_names + + @property + def show_col_idx_names(self) -> bool: + return self.fmt.show_col_idx_names + + @property + def row_levels(self) -> int: + if self.fmt.index: + # showing (row) index + return self.frame.index.nlevels + elif self.show_col_idx_names: + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # If the row index is not displayed a column of + # blank cells need to be included before the DataFrame values. + return 1 + # not showing (row) index + return 0 + + def _get_columns_formatted_values(self) -> Iterable: + return self.columns + + @property + def is_truncated(self) -> bool: + return self.fmt.is_truncated + + @property + def ncols(self) -> int: + return len(self.fmt.tr_frame.columns) + + def write(self, s: Any, indent: int = 0) -> None: + rs = pprint_thing(s) + self.elements.append(" " * indent + rs) + + def write_th( + self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None + ) -> None: + """ + Method for writing a formatted . This will + cause min-width to be set if there is one. + indent : int, default 0 + The indentation level of the cell. + tags : str, default None + Tags to include in the cell. + + Returns + ------- + A written ", indent) + else: + self.write(f'', indent) + indent += indent_delta + + for i, s in enumerate(line): + val_tag = tags.get(i, None) + if header or (self.bold_rows and i < nindex_levels): + self.write_th(s, indent=indent, header=header, tags=val_tag) + else: + self.write_td(s, indent, tags=val_tag) + + indent -= indent_delta + self.write("", indent) + + def _write_table(self, indent: int = 0) -> None: + _classes = ["dataframe"] # Default class. + use_mathjax = get_option("display.html.use_mathjax") + if not use_mathjax: + _classes.append("tex2jax_ignore") + if self.classes is not None: + if isinstance(self.classes, str): + self.classes = self.classes.split() + if not isinstance(self.classes, (list, tuple)): + raise TypeError( + "classes must be a string, list, " + f"or tuple, not {type(self.classes)}" + ) + _classes.extend(self.classes) + + if self.table_id is None: + id_section = "" + else: + id_section = f' id="{self.table_id}"' + + if self.border is None: + border_attr = "" + else: + border_attr = f' border="{self.border}"' + + self.write( + f'', + indent, + ) + + if self.fmt.header or self.show_row_idx_names: + self._write_header(indent + self.indent_delta) + + self._write_body(indent + self.indent_delta) + + self.write("
cell. + + If col_space is set on the formatter then that is used for + the value of min-width. + + Parameters + ---------- + s : object + The data to be written inside the cell. + header : bool, default False + Set to True if the is for use inside
cell. + """ + col_space = self.col_space.get(s, None) + + if header and col_space is not None: + tags = tags or "" + tags += f'style="min-width: {col_space};"' + + self._write_cell(s, kind="th", indent=indent, tags=tags) + + def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None: + self._write_cell(s, kind="td", indent=indent, tags=tags) + + def _write_cell( + self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None + ) -> None: + if tags is not None: + start_tag = f"<{kind} {tags}>" + else: + start_tag = f"<{kind}>" + + if self.escape: + # escape & first to prevent double escaping of & + esc = {"&": r"&", "<": r"<", ">": r">"} + else: + esc = {} + + rs = pprint_thing(s, escape_chars=esc).strip() + + if self.render_links and is_url(rs): + rs_unescaped = pprint_thing(s, escape_chars={}).strip() + start_tag += f'' + end_a = "" + else: + end_a = "" + + self.write(f"{start_tag}{rs}{end_a}", indent) + + def write_tr( + self, + line: Iterable, + indent: int = 0, + indent_delta: int = 0, + header: bool = False, + align: str | None = None, + tags: dict[int, str] | None = None, + nindex_levels: int = 0, + ) -> None: + if tags is None: + tags = {} + + if align is None: + self.write("
", indent) + + def _write_col_header(self, indent: int) -> None: + row: list[Hashable] + is_truncated_horizontally = self.fmt.is_truncated_horizontally + if isinstance(self.columns, MultiIndex): + template = 'colspan="{span:d}" halign="left"' + + sentinel: lib.NoDefault | bool + if self.fmt.sparsify: + # GH3547 + sentinel = lib.no_default + else: + sentinel = False + levels = self.columns._format_multi(sparsify=sentinel, include_names=False) + level_lengths = get_level_lengths(levels, sentinel) + inner_lvl = len(level_lengths) - 1 + for lnum, (records, values) in enumerate(zip(level_lengths, levels)): + if is_truncated_horizontally: + # modify the header lines + ins_col = self.fmt.tr_col_num + if self.fmt.sparsify: + recs_new = {} + # Increment tags after ... col. + for tag, span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + elif tag + span > ins_col: + recs_new[tag] = span + 1 + if lnum == inner_lvl: + values = ( + values[:ins_col] + ("...",) + values[ins_col:] + ) + else: + # sparse col headers do not receive a ... + values = ( + values[:ins_col] + + (values[ins_col - 1],) + + values[ins_col:] + ) + else: + recs_new[tag] = span + # if ins_col lies between tags, all col headers + # get ... + if tag + span == ins_col: + recs_new[ins_col] = 1 + values = values[:ins_col] + ("...",) + values[ins_col:] + records = recs_new + inner_lvl = len(level_lengths) - 1 + if lnum == inner_lvl: + records[ins_col] = 1 + else: + recs_new = {} + for tag, span in list(records.items()): + if tag >= ins_col: + recs_new[tag + 1] = span + else: + recs_new[tag] = span + recs_new[ins_col] = 1 + records = recs_new + values = values[:ins_col] + ["..."] + values[ins_col:] + + # see gh-22579 + # Column Offset Bug with to_html(index=False) with + # MultiIndex Columns and Index. + # Initially fill row with blank cells before column names. + # TODO: Refactor to remove code duplication with code + # block below for standard columns index. + row = [""] * (self.row_levels - 1) + if self.fmt.index or self.show_col_idx_names: + # see gh-22747 + # If to_html(index_names=False) do not show columns + # index names. + # TODO: Refactor to use _get_column_name_list from + # DataFrameFormatter class and create a + # _get_formatted_column_labels function for code + # parity with DataFrameFormatter class. + if self.fmt.show_index_names: + name = self.columns.names[lnum] + row.append(pprint_thing(name or "")) + else: + row.append("") + + tags = {} + j = len(row) + for i, v in enumerate(values): + if i in records: + if records[i] > 1: + tags[j] = template.format(span=records[i]) + else: + continue + j += 1 + row.append(v) + self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) + else: + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # Initially fill row with blank cells before column names. + # TODO: Refactor to remove code duplication with code block + # above for columns MultiIndex. + row = [""] * (self.row_levels - 1) + if self.fmt.index or self.show_col_idx_names: + # see gh-22747 + # If to_html(index_names=False) do not show columns + # index names. + # TODO: Refactor to use _get_column_name_list from + # DataFrameFormatter class. + if self.fmt.show_index_names: + row.append(self.columns.name or "") + else: + row.append("") + row.extend(self._get_columns_formatted_values()) + align = self.fmt.justify + + if is_truncated_horizontally: + ins_col = self.row_levels + self.fmt.tr_col_num + row.insert(ins_col, "...") + + self.write_tr(row, indent, self.indent_delta, header=True, align=align) + + def _write_row_header(self, indent: int) -> None: + is_truncated_horizontally = self.fmt.is_truncated_horizontally + row = [x if x is not None else "" for x in self.frame.index.names] + [""] * ( + self.ncols + (1 if is_truncated_horizontally else 0) + ) + self.write_tr(row, indent, self.indent_delta, header=True) + + def _write_header(self, indent: int) -> None: + self.write("", indent) + + if self.fmt.header: + self._write_col_header(indent + self.indent_delta) + + if self.show_row_idx_names: + self._write_row_header(indent + self.indent_delta) + + self.write("", indent) + + def _get_formatted_values(self) -> dict[int, list[str]]: + with option_context("display.max_colwidth", None): + fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)} + return fmt_values + + def _write_body(self, indent: int) -> None: + self.write("", indent) + fmt_values = self._get_formatted_values() + + # write values + if self.fmt.index and isinstance(self.frame.index, MultiIndex): + self._write_hierarchical_rows(fmt_values, indent + self.indent_delta) + else: + self._write_regular_rows(fmt_values, indent + self.indent_delta) + + self.write("", indent) + + def _write_regular_rows( + self, fmt_values: Mapping[int, list[str]], indent: int + ) -> None: + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically + + nrows = len(self.fmt.tr_frame) + + if self.fmt.index: + fmt = self.fmt._get_formatter("__index__") + if fmt is not None: + index_values = self.fmt.tr_frame.index.map(fmt) + else: + # only reached with non-Multi index + index_values = self.fmt.tr_frame.index._format_flat(include_name=False) + + row: list[str] = [] + for i in range(nrows): + if is_truncated_vertically and i == (self.fmt.tr_row_num): + str_sep_row = ["..."] * len(row) + self.write_tr( + str_sep_row, + indent, + self.indent_delta, + tags=None, + nindex_levels=self.row_levels, + ) + + row = [] + if self.fmt.index: + row.append(index_values[i]) + # see gh-22579 + # Column misalignment also occurs for + # a standard index when the columns index is named. + # Add blank cell before data cells. + elif self.show_col_idx_names: + row.append("") + row.extend(fmt_values[j][i] for j in range(self.ncols)) + + if is_truncated_horizontally: + dot_col_ix = self.fmt.tr_col_num + self.row_levels + row.insert(dot_col_ix, "...") + self.write_tr( + row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels + ) + + def _write_hierarchical_rows( + self, fmt_values: Mapping[int, list[str]], indent: int + ) -> None: + template = 'rowspan="{span}" valign="top"' + + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically + frame = self.fmt.tr_frame + nrows = len(frame) + + assert isinstance(frame.index, MultiIndex) + idx_values = frame.index._format_multi(sparsify=False, include_names=False) + idx_values = list(zip(*idx_values)) + + if self.fmt.sparsify: + # GH3547 + sentinel = lib.no_default + levels = frame.index._format_multi(sparsify=sentinel, include_names=False) + + level_lengths = get_level_lengths(levels, sentinel) + inner_lvl = len(level_lengths) - 1 + if is_truncated_vertically: + # Insert ... row and adjust idx_values and + # level_lengths to take this into account. + ins_row = self.fmt.tr_row_num + inserted = False + for lnum, records in enumerate(level_lengths): + rec_new = {} + for tag, span in list(records.items()): + if tag >= ins_row: + rec_new[tag + 1] = span + elif tag + span > ins_row: + rec_new[tag] = span + 1 + + # GH 14882 - Make sure insertion done once + if not inserted: + dot_row = list(idx_values[ins_row - 1]) + dot_row[-1] = "..." + idx_values.insert(ins_row, tuple(dot_row)) + inserted = True + else: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = "..." + idx_values[ins_row] = tuple(dot_row) + else: + rec_new[tag] = span + # If ins_row lies between tags, all cols idx cols + # receive ... + if tag + span == ins_row: + rec_new[ins_row] = 1 + if lnum == 0: + idx_values.insert( + ins_row, tuple(["..."] * len(level_lengths)) + ) + + # GH 14882 - Place ... in correct level + elif inserted: + dot_row = list(idx_values[ins_row]) + dot_row[inner_lvl - lnum] = "..." + idx_values[ins_row] = tuple(dot_row) + level_lengths[lnum] = rec_new + + level_lengths[inner_lvl][ins_row] = 1 + for ix_col in fmt_values: + fmt_values[ix_col].insert(ins_row, "...") + nrows += 1 + + for i in range(nrows): + row = [] + tags = {} + + sparse_offset = 0 + j = 0 + for records, v in zip(level_lengths, idx_values[i]): + if i in records: + if records[i] > 1: + tags[j] = template.format(span=records[i]) + else: + sparse_offset += 1 + continue + + j += 1 + row.append(v) + + row.extend(fmt_values[j][i] for j in range(self.ncols)) + if is_truncated_horizontally: + row.insert( + self.row_levels - sparse_offset + self.fmt.tr_col_num, "..." + ) + self.write_tr( + row, + indent, + self.indent_delta, + tags=tags, + nindex_levels=len(levels) - sparse_offset, + ) + else: + row = [] + for i in range(len(frame)): + if is_truncated_vertically and i == (self.fmt.tr_row_num): + str_sep_row = ["..."] * len(row) + self.write_tr( + str_sep_row, + indent, + self.indent_delta, + tags=None, + nindex_levels=self.row_levels, + ) + + idx_values = list( + zip(*frame.index._format_multi(sparsify=False, include_names=False)) + ) + row = [] + row.extend(idx_values[i]) + row.extend(fmt_values[j][i] for j in range(self.ncols)) + if is_truncated_horizontally: + row.insert(self.row_levels + self.fmt.tr_col_num, "...") + self.write_tr( + row, + indent, + self.indent_delta, + tags=None, + nindex_levels=frame.index.nlevels, + ) + + +class NotebookFormatter(HTMLFormatter): + """ + Internal class for formatting output data in html for display in Jupyter + Notebooks. This class is intended for functionality specific to + DataFrame._repr_html_() and DataFrame.to_html(notebook=True) + """ + + def _get_formatted_values(self) -> dict[int, list[str]]: + return {i: self.fmt.format_col(i) for i in range(self.ncols)} + + def _get_columns_formatted_values(self) -> list[str]: + # only reached with non-Multi Index + return self.columns._format_flat(include_name=False) + + def write_style(self) -> None: + # We use the "scoped" attribute here so that the desired + # style properties for the data frame are not then applied + # throughout the entire notebook. + template_first = """\ + """ + template_select = """\ + .dataframe %s { + %s: %s; + }""" + element_props = [ + ("tbody tr th:only-of-type", "vertical-align", "middle"), + ("tbody tr th", "vertical-align", "top"), + ] + if isinstance(self.columns, MultiIndex): + element_props.append(("thead tr th", "text-align", "left")) + if self.show_row_idx_names: + element_props.append( + ("thead tr:last-of-type th", "text-align", "right") + ) + else: + element_props.append(("thead th", "text-align", "right")) + template_mid = "\n\n".join(template_select % t for t in element_props) + template = dedent(f"{template_first}\n{template_mid}\n{template_last}") + self.write(template) + + def render(self) -> list[str]: + self.write("
") + self.write_style() + super().render() + self.write("
") + return self.elements diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/info.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/info.py new file mode 100644 index 0000000000000000000000000000000000000000..552affbd053f2bed3f4d5f678ddf8eb293f65b01 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/info.py @@ -0,0 +1,1101 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +import sys +from textwrap import dedent +from typing import TYPE_CHECKING + +from pandas._config import get_option + +from pandas.io.formats import format as fmt +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Iterator, + Mapping, + Sequence, + ) + + from pandas._typing import ( + Dtype, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +frame_max_cols_sub = dedent( + """\ + max_cols : int, optional + When to switch from the verbose to the truncated output. If the + DataFrame has more than `max_cols` columns, the truncated output + is used. By default, the setting in + ``pandas.options.display.max_info_columns`` is used.""" +) + + +show_counts_sub = dedent( + """\ + show_counts : bool, optional + Whether to show the non-null counts. By default, this is shown + only if the DataFrame is smaller than + ``pandas.options.display.max_info_rows`` and + ``pandas.options.display.max_info_columns``. A value of True always + shows the counts, and False never shows the counts.""" +) + + +frame_examples_sub = dedent( + """\ + >>> int_values = [1, 2, 3, 4, 5] + >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] + >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] + >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, + ... "float_col": float_values}) + >>> df + int_col text_col float_col + 0 1 alpha 0.00 + 1 2 beta 0.25 + 2 3 gamma 0.50 + 3 4 delta 0.75 + 4 5 epsilon 1.00 + + Prints information of all columns: + + >>> df.info(verbose=True) + + RangeIndex: 5 entries, 0 to 4 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 int_col 5 non-null int64 + 1 text_col 5 non-null object + 2 float_col 5 non-null float64 + dtypes: float64(1), int64(1), object(1) + memory usage: 248.0+ bytes + + Prints a summary of columns count and its dtypes but not per column + information: + + >>> df.info(verbose=False) + + RangeIndex: 5 entries, 0 to 4 + Columns: 3 entries, int_col to float_col + dtypes: float64(1), int64(1), object(1) + memory usage: 248.0+ bytes + + Pipe output of DataFrame.info to buffer instead of sys.stdout, get + buffer content and writes to a text file: + + >>> import io + >>> buffer = io.StringIO() + >>> df.info(buf=buffer) + >>> s = buffer.getvalue() + >>> with open("df_info.txt", "w", + ... encoding="utf-8") as f: # doctest: +SKIP + ... f.write(s) + 260 + + The `memory_usage` parameter allows deep introspection mode, specially + useful for big DataFrames and fine-tune memory optimization: + + >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) + >>> df = pd.DataFrame({ + ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), + ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), + ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) + ... }) + >>> df.info() + + RangeIndex: 1000000 entries, 0 to 999999 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object + dtypes: object(3) + memory usage: 22.9+ MB + + >>> df.info(memory_usage='deep') + + RangeIndex: 1000000 entries, 0 to 999999 + Data columns (total 3 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object + dtypes: object(3) + memory usage: 165.9 MB""" +) + + +frame_see_also_sub = dedent( + """\ + DataFrame.describe: Generate descriptive statistics of DataFrame + columns. + DataFrame.memory_usage: Memory usage of DataFrame columns.""" +) + + +frame_sub_kwargs = { + "klass": "DataFrame", + "type_sub": " and columns", + "max_cols_sub": frame_max_cols_sub, + "show_counts_sub": show_counts_sub, + "examples_sub": frame_examples_sub, + "see_also_sub": frame_see_also_sub, + "version_added_sub": "", +} + + +series_examples_sub = dedent( + """\ + >>> int_values = [1, 2, 3, 4, 5] + >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] + >>> s = pd.Series(text_values, index=int_values) + >>> s.info() + + Index: 5 entries, 1 to 5 + Series name: None + Non-Null Count Dtype + -------------- ----- + 5 non-null object + dtypes: object(1) + memory usage: 80.0+ bytes + + Prints a summary excluding information about its values: + + >>> s.info(verbose=False) + + Index: 5 entries, 1 to 5 + dtypes: object(1) + memory usage: 80.0+ bytes + + Pipe output of Series.info to buffer instead of sys.stdout, get + buffer content and writes to a text file: + + >>> import io + >>> buffer = io.StringIO() + >>> s.info(buf=buffer) + >>> s = buffer.getvalue() + >>> with open("df_info.txt", "w", + ... encoding="utf-8") as f: # doctest: +SKIP + ... f.write(s) + 260 + + The `memory_usage` parameter allows deep introspection mode, specially + useful for big Series and fine-tune memory optimization: + + >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) + >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6)) + >>> s.info() + + RangeIndex: 1000000 entries, 0 to 999999 + Series name: None + Non-Null Count Dtype + -------------- ----- + 1000000 non-null object + dtypes: object(1) + memory usage: 7.6+ MB + + >>> s.info(memory_usage='deep') + + RangeIndex: 1000000 entries, 0 to 999999 + Series name: None + Non-Null Count Dtype + -------------- ----- + 1000000 non-null object + dtypes: object(1) + memory usage: 55.3 MB""" +) + + +series_see_also_sub = dedent( + """\ + Series.describe: Generate descriptive statistics of Series. + Series.memory_usage: Memory usage of Series.""" +) + + +series_sub_kwargs = { + "klass": "Series", + "type_sub": "", + "max_cols_sub": "", + "show_counts_sub": show_counts_sub, + "examples_sub": series_examples_sub, + "see_also_sub": series_see_also_sub, + "version_added_sub": "\n.. versionadded:: 1.4.0\n", +} + + +INFO_DOCSTRING = dedent( + """ + Print a concise summary of a {klass}. + + This method prints information about a {klass} including + the index dtype{type_sub}, non-null values and memory usage. + {version_added_sub}\ + + Parameters + ---------- + verbose : bool, optional + Whether to print the full summary. By default, the setting in + ``pandas.options.display.max_info_columns`` is followed. + buf : writable buffer, defaults to sys.stdout + Where to send the output. By default, the output is printed to + sys.stdout. Pass a writable buffer if you need to further process + the output. + {max_cols_sub} + memory_usage : bool, str, optional + Specifies whether total memory usage of the {klass} + elements (including the index) should be displayed. By default, + this follows the ``pandas.options.display.memory_usage`` setting. + + True always show memory usage. False never shows memory usage. + A value of 'deep' is equivalent to "True with deep introspection". + Memory usage is shown in human-readable units (base-2 + representation). Without deep introspection a memory estimation is + made based in column dtype and number of rows assuming values + consume the same memory amount for corresponding dtypes. With deep + memory introspection, a real memory usage calculation is performed + at the cost of computational resources. See the + :ref:`Frequently Asked Questions ` for more + details. + {show_counts_sub} + + Returns + ------- + None + This method prints a summary of a {klass} and returns None. + + See Also + -------- + {see_also_sub} + + Examples + -------- + {examples_sub} + """ +) + + +def _put_str(s: str | Dtype, space: int) -> str: + """ + Make string of specified length, padding to the right if necessary. + + Parameters + ---------- + s : Union[str, Dtype] + String to be formatted. + space : int + Length to force string to be of. + + Returns + ------- + str + String coerced to given length. + + Examples + -------- + >>> pd.io.formats.info._put_str("panda", 6) + 'panda ' + >>> pd.io.formats.info._put_str("panda", 4) + 'pand' + """ + return str(s)[:space].ljust(space) + + +def _sizeof_fmt(num: float, size_qualifier: str) -> str: + """ + Return size in human readable format. + + Parameters + ---------- + num : int + Size in bytes. + size_qualifier : str + Either empty, or '+' (if lower bound). + + Returns + ------- + str + Size in human readable format. + + Examples + -------- + >>> _sizeof_fmt(23028, '') + '22.5 KB' + + >>> _sizeof_fmt(23028, '+') + '22.5+ KB' + """ + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if num < 1024.0: + return f"{num:3.1f}{size_qualifier} {x}" + num /= 1024.0 + return f"{num:3.1f}{size_qualifier} PB" + + +def _initialize_memory_usage( + memory_usage: bool | str | None = None, +) -> bool | str: + """Get memory usage based on inputs and display options.""" + if memory_usage is None: + memory_usage = get_option("display.memory_usage") + return memory_usage + + +class _BaseInfo(ABC): + """ + Base class for DataFrameInfo and SeriesInfo. + + Parameters + ---------- + data : DataFrame or Series + Either dataframe or series. + memory_usage : bool or str, optional + If "deep", introspect the data deeply by interrogating object dtypes + for system-level memory consumption, and include it in the returned + values. + """ + + data: DataFrame | Series + memory_usage: bool | str + + @property + @abstractmethod + def dtypes(self) -> Iterable[Dtype]: + """ + Dtypes. + + Returns + ------- + dtypes : sequence + Dtype of each of the DataFrame's columns (or one series column). + """ + + @property + @abstractmethod + def dtype_counts(self) -> Mapping[str, int]: + """Mapping dtype - number of counts.""" + + @property + @abstractmethod + def non_null_counts(self) -> Sequence[int]: + """Sequence of non-null counts for all columns or column (if series).""" + + @property + @abstractmethod + def memory_usage_bytes(self) -> int: + """ + Memory usage in bytes. + + Returns + ------- + memory_usage_bytes : int + Object's total memory usage in bytes. + """ + + @property + def memory_usage_string(self) -> str: + """Memory usage in a form of human readable string.""" + return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n" + + @property + def size_qualifier(self) -> str: + size_qualifier = "" + if self.memory_usage: + if self.memory_usage != "deep": + # size_qualifier is just a best effort; not guaranteed to catch + # all cases (e.g., it misses categorical data even with object + # categories) + if ( + "object" in self.dtype_counts + or self.data.index._is_memory_usage_qualified() + ): + size_qualifier = "+" + return size_qualifier + + @abstractmethod + def render( + self, + *, + buf: WriteBuffer[str] | None, + max_cols: int | None, + verbose: bool | None, + show_counts: bool | None, + ) -> None: + pass + + +class DataFrameInfo(_BaseInfo): + """ + Class storing dataframe-specific info. + """ + + def __init__( + self, + data: DataFrame, + memory_usage: bool | str | None = None, + ) -> None: + self.data: DataFrame = data + self.memory_usage = _initialize_memory_usage(memory_usage) + + @property + def dtype_counts(self) -> Mapping[str, int]: + return _get_dataframe_dtype_counts(self.data) + + @property + def dtypes(self) -> Iterable[Dtype]: + """ + Dtypes. + + Returns + ------- + dtypes + Dtype of each of the DataFrame's columns. + """ + return self.data.dtypes + + @property + def ids(self) -> Index: + """ + Column names. + + Returns + ------- + ids : Index + DataFrame's column names. + """ + return self.data.columns + + @property + def col_count(self) -> int: + """Number of columns to be summarized.""" + return len(self.ids) + + @property + def non_null_counts(self) -> Sequence[int]: + """Sequence of non-null counts for all columns or column (if series).""" + return self.data.count() + + @property + def memory_usage_bytes(self) -> int: + deep = self.memory_usage == "deep" + return self.data.memory_usage(index=True, deep=deep).sum() + + def render( + self, + *, + buf: WriteBuffer[str] | None, + max_cols: int | None, + verbose: bool | None, + show_counts: bool | None, + ) -> None: + printer = _DataFrameInfoPrinter( + info=self, + max_cols=max_cols, + verbose=verbose, + show_counts=show_counts, + ) + printer.to_buffer(buf) + + +class SeriesInfo(_BaseInfo): + """ + Class storing series-specific info. + """ + + def __init__( + self, + data: Series, + memory_usage: bool | str | None = None, + ) -> None: + self.data: Series = data + self.memory_usage = _initialize_memory_usage(memory_usage) + + def render( + self, + *, + buf: WriteBuffer[str] | None = None, + max_cols: int | None = None, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + if max_cols is not None: + raise ValueError( + "Argument `max_cols` can only be passed " + "in DataFrame.info, not Series.info" + ) + printer = _SeriesInfoPrinter( + info=self, + verbose=verbose, + show_counts=show_counts, + ) + printer.to_buffer(buf) + + @property + def non_null_counts(self) -> Sequence[int]: + return [self.data.count()] + + @property + def dtypes(self) -> Iterable[Dtype]: + return [self.data.dtypes] + + @property + def dtype_counts(self) -> Mapping[str, int]: + from pandas.core.frame import DataFrame + + return _get_dataframe_dtype_counts(DataFrame(self.data)) + + @property + def memory_usage_bytes(self) -> int: + """Memory usage in bytes. + + Returns + ------- + memory_usage_bytes : int + Object's total memory usage in bytes. + """ + deep = self.memory_usage == "deep" + return self.data.memory_usage(index=True, deep=deep) + + +class _InfoPrinterAbstract: + """ + Class for printing dataframe or series info. + """ + + def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None: + """Save dataframe info into buffer.""" + table_builder = self._create_table_builder() + lines = table_builder.get_lines() + if buf is None: # pragma: no cover + buf = sys.stdout + fmt.buffer_put_lines(buf, lines) + + @abstractmethod + def _create_table_builder(self) -> _TableBuilderAbstract: + """Create instance of table builder.""" + + +class _DataFrameInfoPrinter(_InfoPrinterAbstract): + """ + Class for printing dataframe info. + + Parameters + ---------- + info : DataFrameInfo + Instance of DataFrameInfo. + max_cols : int, optional + When to switch from the verbose to the truncated output. + verbose : bool, optional + Whether to print the full summary. + show_counts : bool, optional + Whether to show the non-null counts. + """ + + def __init__( + self, + info: DataFrameInfo, + max_cols: int | None = None, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + self.info = info + self.data = info.data + self.verbose = verbose + self.max_cols = self._initialize_max_cols(max_cols) + self.show_counts = self._initialize_show_counts(show_counts) + + @property + def max_rows(self) -> int: + """Maximum info rows to be displayed.""" + return get_option("display.max_info_rows", len(self.data) + 1) + + @property + def exceeds_info_cols(self) -> bool: + """Check if number of columns to be summarized does not exceed maximum.""" + return bool(self.col_count > self.max_cols) + + @property + def exceeds_info_rows(self) -> bool: + """Check if number of rows to be summarized does not exceed maximum.""" + return bool(len(self.data) > self.max_rows) + + @property + def col_count(self) -> int: + """Number of columns to be summarized.""" + return self.info.col_count + + def _initialize_max_cols(self, max_cols: int | None) -> int: + if max_cols is None: + return get_option("display.max_info_columns", self.col_count + 1) + return max_cols + + def _initialize_show_counts(self, show_counts: bool | None) -> bool: + if show_counts is None: + return bool(not self.exceeds_info_cols and not self.exceeds_info_rows) + else: + return show_counts + + def _create_table_builder(self) -> _DataFrameTableBuilder: + """ + Create instance of table builder based on verbosity and display settings. + """ + if self.verbose: + return _DataFrameTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + elif self.verbose is False: # specifically set to False, not necessarily None + return _DataFrameTableBuilderNonVerbose(info=self.info) + elif self.exceeds_info_cols: + return _DataFrameTableBuilderNonVerbose(info=self.info) + else: + return _DataFrameTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + + +class _SeriesInfoPrinter(_InfoPrinterAbstract): + """Class for printing series info. + + Parameters + ---------- + info : SeriesInfo + Instance of SeriesInfo. + verbose : bool, optional + Whether to print the full summary. + show_counts : bool, optional + Whether to show the non-null counts. + """ + + def __init__( + self, + info: SeriesInfo, + verbose: bool | None = None, + show_counts: bool | None = None, + ) -> None: + self.info = info + self.data = info.data + self.verbose = verbose + self.show_counts = self._initialize_show_counts(show_counts) + + def _create_table_builder(self) -> _SeriesTableBuilder: + """ + Create instance of table builder based on verbosity. + """ + if self.verbose or self.verbose is None: + return _SeriesTableBuilderVerbose( + info=self.info, + with_counts=self.show_counts, + ) + else: + return _SeriesTableBuilderNonVerbose(info=self.info) + + def _initialize_show_counts(self, show_counts: bool | None) -> bool: + if show_counts is None: + return True + else: + return show_counts + + +class _TableBuilderAbstract(ABC): + """ + Abstract builder for info table. + """ + + _lines: list[str] + info: _BaseInfo + + @abstractmethod + def get_lines(self) -> list[str]: + """Product in a form of list of lines (strings).""" + + @property + def data(self) -> DataFrame | Series: + return self.info.data + + @property + def dtypes(self) -> Iterable[Dtype]: + """Dtypes of each of the DataFrame's columns.""" + return self.info.dtypes + + @property + def dtype_counts(self) -> Mapping[str, int]: + """Mapping dtype - number of counts.""" + return self.info.dtype_counts + + @property + def display_memory_usage(self) -> bool: + """Whether to display memory usage.""" + return bool(self.info.memory_usage) + + @property + def memory_usage_string(self) -> str: + """Memory usage string with proper size qualifier.""" + return self.info.memory_usage_string + + @property + def non_null_counts(self) -> Sequence[int]: + return self.info.non_null_counts + + def add_object_type_line(self) -> None: + """Add line with string representation of dataframe to the table.""" + self._lines.append(str(type(self.data))) + + def add_index_range_line(self) -> None: + """Add line with range of indices to the table.""" + self._lines.append(self.data.index._summary()) + + def add_dtypes_line(self) -> None: + """Add summary line with dtypes present in dataframe.""" + collected_dtypes = [ + f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items()) + ] + self._lines.append(f"dtypes: {', '.join(collected_dtypes)}") + + +class _DataFrameTableBuilder(_TableBuilderAbstract): + """ + Abstract builder for dataframe info table. + + Parameters + ---------- + info : DataFrameInfo. + Instance of DataFrameInfo. + """ + + def __init__(self, *, info: DataFrameInfo) -> None: + self.info: DataFrameInfo = info + + def get_lines(self) -> list[str]: + self._lines = [] + if self.col_count == 0: + self._fill_empty_info() + else: + self._fill_non_empty_info() + return self._lines + + def _fill_empty_info(self) -> None: + """Add lines to the info table, pertaining to empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self._lines.append(f"Empty {type(self.data).__name__}\n") + + @abstractmethod + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + + @property + def data(self) -> DataFrame: + """DataFrame.""" + return self.info.data + + @property + def ids(self) -> Index: + """Dataframe columns.""" + return self.info.ids + + @property + def col_count(self) -> int: + """Number of dataframe columns to be summarized.""" + return self.info.col_count + + def add_memory_usage_line(self) -> None: + """Add line containing memory usage.""" + self._lines.append(f"memory usage: {self.memory_usage_string}") + + +class _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder): + """ + Dataframe info table builder for non-verbose output. + """ + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_columns_summary_line() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + def add_columns_summary_line(self) -> None: + self._lines.append(self.ids._summary(name="Columns")) + + +class _TableBuilderVerboseMixin(_TableBuilderAbstract): + """ + Mixin for verbose info output. + """ + + SPACING: str = " " * 2 + strrows: Sequence[Sequence[str]] + gross_column_widths: Sequence[int] + with_counts: bool + + @property + @abstractmethod + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + + @property + def header_column_widths(self) -> Sequence[int]: + """Widths of header columns (only titles).""" + return [len(col) for col in self.headers] + + def _get_gross_column_widths(self) -> Sequence[int]: + """Get widths of columns containing both headers and actual content.""" + body_column_widths = self._get_body_column_widths() + return [ + max(*widths) + for widths in zip(self.header_column_widths, body_column_widths) + ] + + def _get_body_column_widths(self) -> Sequence[int]: + """Get widths of table content columns.""" + strcols: Sequence[Sequence[str]] = list(zip(*self.strrows)) + return [max(len(x) for x in col) for col in strcols] + + def _gen_rows(self) -> Iterator[Sequence[str]]: + """ + Generator function yielding rows content. + + Each element represents a row comprising a sequence of strings. + """ + if self.with_counts: + return self._gen_rows_with_counts() + else: + return self._gen_rows_without_counts() + + @abstractmethod + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + + @abstractmethod + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + + def add_header_line(self) -> None: + header_line = self.SPACING.join( + [ + _put_str(header, col_width) + for header, col_width in zip(self.headers, self.gross_column_widths) + ] + ) + self._lines.append(header_line) + + def add_separator_line(self) -> None: + separator_line = self.SPACING.join( + [ + _put_str("-" * header_colwidth, gross_colwidth) + for header_colwidth, gross_colwidth in zip( + self.header_column_widths, self.gross_column_widths + ) + ] + ) + self._lines.append(separator_line) + + def add_body_lines(self) -> None: + for row in self.strrows: + body_line = self.SPACING.join( + [ + _put_str(col, gross_colwidth) + for col, gross_colwidth in zip(row, self.gross_column_widths) + ] + ) + self._lines.append(body_line) + + def _gen_non_null_counts(self) -> Iterator[str]: + """Iterator with string representation of non-null counts.""" + for count in self.non_null_counts: + yield f"{count} non-null" + + def _gen_dtypes(self) -> Iterator[str]: + """Iterator with string representation of column dtypes.""" + for dtype in self.dtypes: + yield pprint_thing(dtype) + + +class _DataFrameTableBuilderVerbose(_DataFrameTableBuilder, _TableBuilderVerboseMixin): + """ + Dataframe info table builder for verbose output. + """ + + def __init__( + self, + *, + info: DataFrameInfo, + with_counts: bool, + ) -> None: + self.info = info + self.with_counts = with_counts + self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) + self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty dataframe.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_columns_summary_line() + self.add_header_line() + self.add_separator_line() + self.add_body_lines() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + @property + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + if self.with_counts: + return [" # ", "Column", "Non-Null Count", "Dtype"] + return [" # ", "Column", "Dtype"] + + def add_columns_summary_line(self) -> None: + self._lines.append(f"Data columns (total {self.col_count} columns):") + + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + yield from zip( + self._gen_line_numbers(), + self._gen_columns(), + self._gen_dtypes(), + ) + + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + yield from zip( + self._gen_line_numbers(), + self._gen_columns(), + self._gen_non_null_counts(), + self._gen_dtypes(), + ) + + def _gen_line_numbers(self) -> Iterator[str]: + """Iterator with string representation of column numbers.""" + for i, _ in enumerate(self.ids): + yield f" {i}" + + def _gen_columns(self) -> Iterator[str]: + """Iterator with string representation of column names.""" + for col in self.ids: + yield pprint_thing(col) + + +class _SeriesTableBuilder(_TableBuilderAbstract): + """ + Abstract builder for series info table. + + Parameters + ---------- + info : SeriesInfo. + Instance of SeriesInfo. + """ + + def __init__(self, *, info: SeriesInfo) -> None: + self.info: SeriesInfo = info + + def get_lines(self) -> list[str]: + self._lines = [] + self._fill_non_empty_info() + return self._lines + + @property + def data(self) -> Series: + """Series.""" + return self.info.data + + def add_memory_usage_line(self) -> None: + """Add line containing memory usage.""" + self._lines.append(f"memory usage: {self.memory_usage_string}") + + @abstractmethod + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + + +class _SeriesTableBuilderNonVerbose(_SeriesTableBuilder): + """ + Series info table builder for non-verbose output. + """ + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + +class _SeriesTableBuilderVerbose(_SeriesTableBuilder, _TableBuilderVerboseMixin): + """ + Series info table builder for verbose output. + """ + + def __init__( + self, + *, + info: SeriesInfo, + with_counts: bool, + ) -> None: + self.info = info + self.with_counts = with_counts + self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) + self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() + + def _fill_non_empty_info(self) -> None: + """Add lines to the info table, pertaining to non-empty series.""" + self.add_object_type_line() + self.add_index_range_line() + self.add_series_name_line() + self.add_header_line() + self.add_separator_line() + self.add_body_lines() + self.add_dtypes_line() + if self.display_memory_usage: + self.add_memory_usage_line() + + def add_series_name_line(self) -> None: + self._lines.append(f"Series name: {self.data.name}") + + @property + def headers(self) -> Sequence[str]: + """Headers names of the columns in verbose table.""" + if self.with_counts: + return ["Non-Null Count", "Dtype"] + return ["Dtype"] + + def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data without counts.""" + yield from self._gen_dtypes() + + def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: + """Iterator with string representation of body data with counts.""" + yield from zip( + self._gen_non_null_counts(), + self._gen_dtypes(), + ) + + +def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]: + """ + Create mapping between datatypes and their number of occurrences. + """ + # groupby dtype.name to collect e.g. Categorical columns + return df.dtypes.value_counts().groupby(lambda x: x.name).sum() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/printing.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/printing.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc9368f8846a6423655040673df283d111efeda --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/printing.py @@ -0,0 +1,572 @@ +""" +Printing tools. +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Mapping, + Sequence, +) +import sys +from typing import ( + Any, + Callable, + TypeVar, + Union, +) +from unicodedata import east_asian_width + +from pandas._config import get_option + +from pandas.core.dtypes.inference import is_sequence + +from pandas.io.formats.console import get_console_size + +EscapeChars = Union[Mapping[str, str], Iterable[str]] +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + + +def adjoin(space: int, *lists: list[str], **kwargs) -> str: + """ + Glues together two sets of strings using the amount of space requested. + The idea is to prettify. + + ---------- + space : int + number of spaces for padding + lists : str + list of str which being joined + strlen : callable + function used to calculate the length of each str. Needed for unicode + handling. + justfunc : callable + function used to justify str. Needed for unicode handling. + """ + strlen = kwargs.pop("strlen", len) + justfunc = kwargs.pop("justfunc", _adj_justify) + + newLists = [] + lengths = [max(map(strlen, x)) + space for x in lists[:-1]] + # not the last one + lengths.append(max(map(len, lists[-1]))) + maxLen = max(map(len, lists)) + for i, lst in enumerate(lists): + nl = justfunc(lst, lengths[i], mode="left") + nl = ([" " * lengths[i]] * (maxLen - len(lst))) + nl + newLists.append(nl) + toJoin = zip(*newLists) + return "\n".join("".join(lines) for lines in toJoin) + + +def _adj_justify(texts: Iterable[str], max_len: int, mode: str = "right") -> list[str]: + """ + Perform ljust, center, rjust against string or list-like + """ + if mode == "left": + return [x.ljust(max_len) for x in texts] + elif mode == "center": + return [x.center(max_len) for x in texts] + else: + return [x.rjust(max_len) for x in texts] + + +# Unicode consolidation +# --------------------- +# +# pprinting utility functions for generating Unicode text or +# bytes(3.x)/str(2.x) representations of objects. +# Try to use these as much as possible rather than rolling your own. +# +# When to use +# ----------- +# +# 1) If you're writing code internal to pandas (no I/O directly involved), +# use pprint_thing(). +# +# It will always return unicode text which can handled by other +# parts of the package without breakage. +# +# 2) if you need to write something out to file, use +# pprint_thing_encoded(encoding). +# +# If no encoding is specified, it defaults to utf-8. Since encoding pure +# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're +# working with straight ascii. + + +def _pprint_seq( + seq: Sequence, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds +) -> str: + """ + internal. pprinter for iterables. you should probably use pprint_thing() + rather than calling this directly. + + bounds length of printed sequence, depending on options + """ + if isinstance(seq, set): + fmt = "{{{body}}}" + else: + fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})" + + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) + + s = iter(seq) + # handle sets, no slicing + r = [ + pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds) + for i in range(min(nitems, len(seq))) + ] + body = ", ".join(r) + + if nitems < len(seq): + body += ", ..." + elif isinstance(seq, tuple) and len(seq) == 1: + body += "," + + return fmt.format(body=body) + + +def _pprint_dict( + seq: Mapping, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds +) -> str: + """ + internal. pprinter for iterables. you should probably use pprint_thing() + rather than calling this directly. + """ + fmt = "{{{things}}}" + pairs = [] + + pfmt = "{key}: {val}" + + if max_seq_items is False: + nitems = len(seq) + else: + nitems = max_seq_items or get_option("max_seq_items") or len(seq) + + for k, v in list(seq.items())[:nitems]: + pairs.append( + pfmt.format( + key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), + val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), + ) + ) + + if nitems < len(seq): + return fmt.format(things=", ".join(pairs) + ", ...") + else: + return fmt.format(things=", ".join(pairs)) + + +def pprint_thing( + thing: Any, + _nest_lvl: int = 0, + escape_chars: EscapeChars | None = None, + default_escapes: bool = False, + quote_strings: bool = False, + max_seq_items: int | None = None, +) -> str: + """ + This function is the sanctioned way of converting objects + to a string representation and properly handles nested sequences. + + Parameters + ---------- + thing : anything to be formatted + _nest_lvl : internal use only. pprint_thing() is mutually-recursive + with pprint_sequence, this argument is used to keep track of the + current nesting level, and limit it. + escape_chars : list or dict, optional + Characters to escape. If a dict is passed the values are the + replacements + default_escapes : bool, default False + Whether the input escape characters replaces or adds to the defaults + max_seq_items : int or None, default None + Pass through to other pretty printers to limit sequence printing + + Returns + ------- + str + """ + + def as_escaped_string( + thing: Any, escape_chars: EscapeChars | None = escape_chars + ) -> str: + translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} + if isinstance(escape_chars, dict): + if default_escapes: + translate.update(escape_chars) + else: + translate = escape_chars + escape_chars = list(escape_chars.keys()) + else: + escape_chars = escape_chars or () + + result = str(thing) + for c in escape_chars: + result = result.replace(c, translate[c]) + return result + + if hasattr(thing, "__next__"): + return str(thing) + elif isinstance(thing, dict) and _nest_lvl < get_option( + "display.pprint_nest_depth" + ): + result = _pprint_dict( + thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items + ) + elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"): + result = _pprint_seq( + thing, + _nest_lvl, + escape_chars=escape_chars, + quote_strings=quote_strings, + max_seq_items=max_seq_items, + ) + elif isinstance(thing, str) and quote_strings: + result = f"'{as_escaped_string(thing)}'" + else: + result = as_escaped_string(thing) + + return result + + +def pprint_thing_encoded( + object, encoding: str = "utf-8", errors: str = "replace" +) -> bytes: + value = pprint_thing(object) # get unicode representation of object + return value.encode(encoding, errors) + + +def enable_data_resource_formatter(enable: bool) -> None: + if "IPython" not in sys.modules: + # definitely not in IPython + return + from IPython import get_ipython + + ip = get_ipython() + if ip is None: + # still not in IPython + return + + formatters = ip.display_formatter.formatters + mimetype = "application/vnd.dataresource+json" + + if enable: + if mimetype not in formatters: + # define tableschema formatter + from IPython.core.formatters import BaseFormatter + from traitlets import ObjectName + + class TableSchemaFormatter(BaseFormatter): + print_method = ObjectName("_repr_data_resource_") + _return_type = (dict,) + + # register it: + formatters[mimetype] = TableSchemaFormatter() + # enable it if it's been disabled: + formatters[mimetype].enabled = True + # unregister tableschema mime-type + elif mimetype in formatters: + formatters[mimetype].enabled = False + + +def default_pprint(thing: Any, max_seq_items: int | None = None) -> str: + return pprint_thing( + thing, + escape_chars=("\t", "\r", "\n"), + quote_strings=True, + max_seq_items=max_seq_items, + ) + + +def format_object_summary( + obj, + formatter: Callable, + is_justify: bool = True, + name: str | None = None, + indent_for_name: bool = True, + line_break_each_value: bool = False, +) -> str: + """ + Return the formatted obj as a unicode string + + Parameters + ---------- + obj : object + must be iterable and support __getitem__ + formatter : callable + string formatter for an element + is_justify : bool + should justify the display + name : name, optional + defaults to the class name of the obj + indent_for_name : bool, default True + Whether subsequent lines should be indented to + align with the name. + line_break_each_value : bool, default False + If True, inserts a line break for each value of ``obj``. + If False, only break lines when the a line of values gets wider + than the display width. + + Returns + ------- + summary string + """ + display_width, _ = get_console_size() + if display_width is None: + display_width = get_option("display.width") or 80 + if name is None: + name = type(obj).__name__ + + if indent_for_name: + name_len = len(name) + space1 = f'\n{(" " * (name_len + 1))}' + space2 = f'\n{(" " * (name_len + 2))}' + else: + space1 = "\n" + space2 = "\n " # space for the opening '[' + + n = len(obj) + if line_break_each_value: + # If we want to vertically align on each value of obj, we need to + # separate values by a line break and indent the values + sep = ",\n " + " " * len(name) + else: + sep = "," + max_seq_items = get_option("display.max_seq_items") or n + + # are we a truncated display + is_truncated = n > max_seq_items + + # adj can optionally handle unicode eastern asian width + adj = get_adjustment() + + def _extend_line( + s: str, line: str, value: str, display_width: int, next_line_prefix: str + ) -> tuple[str, str]: + if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width: + s += line.rstrip() + line = next_line_prefix + line += value + return s, line + + def best_len(values: list[str]) -> int: + if values: + return max(adj.len(x) for x in values) + else: + return 0 + + close = ", " + + if n == 0: + summary = f"[]{close}" + elif n == 1 and not line_break_each_value: + first = formatter(obj[0]) + summary = f"[{first}]{close}" + elif n == 2 and not line_break_each_value: + first = formatter(obj[0]) + last = formatter(obj[-1]) + summary = f"[{first}, {last}]{close}" + else: + if max_seq_items == 1: + # If max_seq_items=1 show only last element + head = [] + tail = [formatter(x) for x in obj[-1:]] + elif n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in obj[:n]] + tail = [formatter(x) for x in obj[-n:]] + else: + head = [] + tail = [formatter(x) for x in obj] + + # adjust all values to max length if needed + if is_justify: + if line_break_each_value: + # Justify each string in the values of head and tail, so the + # strings will right align when head and tail are stacked + # vertically. + head, tail = _justify(head, tail) + elif is_truncated or not ( + len(", ".join(head)) < display_width + and len(", ".join(tail)) < display_width + ): + # Each string in head and tail should align with each other + max_length = max(best_len(head), best_len(tail)) + head = [x.rjust(max_length) for x in head] + tail = [x.rjust(max_length) for x in tail] + # If we are not truncated and we are only a single + # line, then don't justify + + if line_break_each_value: + # Now head and tail are of type List[Tuple[str]]. Below we + # convert them into List[str], so there will be one string per + # value. Also truncate items horizontally if wider than + # max_space + max_space = display_width - len(space2) + value = tail[0] + max_items = 1 + for num_items in reversed(range(1, len(value) + 1)): + pprinted_seq = _pprint_seq(value, max_seq_items=num_items) + if len(pprinted_seq) < max_space: + max_items = num_items + break + head = [_pprint_seq(x, max_seq_items=max_items) for x in head] + tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail] + + summary = "" + line = space2 + + for head_value in head: + word = head_value + sep + " " + summary, line = _extend_line(summary, line, word, display_width, space2) + + if is_truncated: + # remove trailing space of last line + summary += line.rstrip() + space2 + "..." + line = space2 + + for tail_item in tail[:-1]: + word = tail_item + sep + " " + summary, line = _extend_line(summary, line, word, display_width, space2) + + # last value: no sep added + 1 space of width used for trailing ',' + summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2) + summary += line + + # right now close is either '' or ', ' + # Now we want to include the ']', but not the maybe space. + close = "]" + close.rstrip(" ") + summary += close + + if len(summary) > (display_width) or line_break_each_value: + summary += space1 + else: # one row + summary += " " + + # remove initial space + summary = "[" + summary[len(space2) :] + + return summary + + +def _justify( + head: list[Sequence[str]], tail: list[Sequence[str]] +) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]: + """ + Justify items in head and tail, so they are right-aligned when stacked. + + Parameters + ---------- + head : list-like of list-likes of strings + tail : list-like of list-likes of strings + + Returns + ------- + tuple of list of tuples of strings + Same as head and tail, but items are right aligned when stacked + vertically. + + Examples + -------- + >>> _justify([['a', 'b']], [['abc', 'abcd']]) + ([(' a', ' b')], [('abc', 'abcd')]) + """ + combined = head + tail + + # For each position for the sequences in ``combined``, + # find the length of the largest string. + max_length = [0] * len(combined[0]) + for inner_seq in combined: + length = [len(item) for item in inner_seq] + max_length = [max(x, y) for x, y in zip(max_length, length)] + + # justify each item in each list-like in head and tail using max_length + head_tuples = [ + tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head + ] + tail_tuples = [ + tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail + ] + return head_tuples, tail_tuples + + +class PrettyDict(dict[_KT, _VT]): + """Dict extension to support abbreviated __repr__""" + + def __repr__(self) -> str: + return pprint_thing(self) + + +class _TextAdjustment: + def __init__(self) -> None: + self.encoding = get_option("display.encoding") + + def len(self, text: str) -> int: + return len(text) + + def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]: + """ + Perform ljust, center, rjust against string or list-like + """ + if mode == "left": + return [x.ljust(max_len) for x in texts] + elif mode == "center": + return [x.center(max_len) for x in texts] + else: + return [x.rjust(max_len) for x in texts] + + def adjoin(self, space: int, *lists, **kwargs) -> str: + return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs) + + +class _EastAsianTextAdjustment(_TextAdjustment): + def __init__(self) -> None: + super().__init__() + if get_option("display.unicode.ambiguous_as_wide"): + self.ambiguous_width = 2 + else: + self.ambiguous_width = 1 + + # Definition of East Asian Width + # https://unicode.org/reports/tr11/ + # Ambiguous width can be changed by option + self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1} + + def len(self, text: str) -> int: + """ + Calculate display width considering unicode East Asian Width + """ + if not isinstance(text, str): + return len(text) + + return sum( + self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text + ) + + def justify( + self, texts: Iterable[str], max_len: int, mode: str = "right" + ) -> list[str]: + # re-calculate padding space per str considering East Asian Width + def _get_pad(t): + return max_len - self.len(t) + len(t) + + if mode == "left": + return [x.ljust(_get_pad(x)) for x in texts] + elif mode == "center": + return [x.center(_get_pad(x)) for x in texts] + else: + return [x.rjust(_get_pad(x)) for x in texts] + + +def get_adjustment() -> _TextAdjustment: + use_east_asian_width = get_option("display.unicode.east_asian_width") + if use_east_asian_width: + return _EastAsianTextAdjustment() + else: + return _TextAdjustment() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/string.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/string.py new file mode 100644 index 0000000000000000000000000000000000000000..cdad388592717dff79fde61bf35a12c0635034c1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/string.py @@ -0,0 +1,206 @@ +""" +Module for formatting output data in console (to string). +""" +from __future__ import annotations + +from shutil import get_terminal_size +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import Iterable + + from pandas.io.formats.format import DataFrameFormatter + + +class StringFormatter: + """Formatter for string representation of a dataframe.""" + + def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None: + self.fmt = fmt + self.adj = fmt.adj + self.frame = fmt.frame + self.line_width = line_width + + def to_string(self) -> str: + text = self._get_string_representation() + if self.fmt.should_show_dimensions: + text = f"{text}{self.fmt.dimensions_info}" + return text + + def _get_strcols(self) -> list[list[str]]: + strcols = self.fmt.get_strcols() + if self.fmt.is_truncated: + strcols = self._insert_dot_separators(strcols) + return strcols + + def _get_string_representation(self) -> str: + if self.fmt.frame.empty: + return self._empty_info_line + + strcols = self._get_strcols() + + if self.line_width is None: + # no need to wrap around just print the whole frame + return self.adj.adjoin(1, *strcols) + + if self._need_to_wrap_around: + return self._join_multiline(strcols) + + return self._fit_strcols_to_terminal_width(strcols) + + @property + def _empty_info_line(self) -> str: + return ( + f"Empty {type(self.frame).__name__}\n" + f"Columns: {pprint_thing(self.frame.columns)}\n" + f"Index: {pprint_thing(self.frame.index)}" + ) + + @property + def _need_to_wrap_around(self) -> bool: + return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0) + + def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]: + str_index = self.fmt._get_formatted_index(self.fmt.tr_frame) + index_length = len(str_index) + + if self.fmt.is_truncated_horizontally: + strcols = self._insert_dot_separator_horizontal(strcols, index_length) + + if self.fmt.is_truncated_vertically: + strcols = self._insert_dot_separator_vertical(strcols, index_length) + + return strcols + + @property + def _adjusted_tr_col_num(self) -> int: + return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num + + def _insert_dot_separator_horizontal( + self, strcols: list[list[str]], index_length: int + ) -> list[list[str]]: + strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length) + return strcols + + def _insert_dot_separator_vertical( + self, strcols: list[list[str]], index_length: int + ) -> list[list[str]]: + n_header_rows = index_length - len(self.fmt.tr_frame) + row_num = self.fmt.tr_row_num + for ix, col in enumerate(strcols): + cwidth = self.adj.len(col[row_num]) + + if self.fmt.is_truncated_horizontally: + is_dot_col = ix == self._adjusted_tr_col_num + else: + is_dot_col = False + + if cwidth > 3 or is_dot_col: + dots = "..." + else: + dots = ".." + + if ix == 0 and self.fmt.index: + dot_mode = "left" + elif is_dot_col: + cwidth = 4 + dot_mode = "right" + else: + dot_mode = "right" + + dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0] + col.insert(row_num + n_header_rows, dot_str) + return strcols + + def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str: + lwidth = self.line_width + adjoin_width = 1 + strcols = list(strcols_input) + + if self.fmt.index: + idx = strcols.pop(0) + lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width + + col_widths = [ + np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 + for col in strcols + ] + + assert lwidth is not None + col_bins = _binify(col_widths, lwidth) + nbins = len(col_bins) + + str_lst = [] + start = 0 + for i, end in enumerate(col_bins): + row = strcols[start:end] + if self.fmt.index: + row.insert(0, idx) + if nbins > 1: + nrows = len(row[-1]) + if end <= len(strcols) and i < nbins - 1: + row.append([" \\"] + [" "] * (nrows - 1)) + else: + row.append([" "] * nrows) + str_lst.append(self.adj.adjoin(adjoin_width, *row)) + start = end + return "\n\n".join(str_lst) + + def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str: + from pandas import Series + + lines = self.adj.adjoin(1, *strcols).split("\n") + max_len = Series(lines).str.len().max() + # plus truncate dot col + width, _ = get_terminal_size() + dif = max_len - width + # '+ 1' to avoid too wide repr (GH PR #17023) + adj_dif = dif + 1 + col_lens = Series([Series(ele).str.len().max() for ele in strcols]) + n_cols = len(col_lens) + counter = 0 + while adj_dif > 0 and n_cols > 1: + counter += 1 + mid = round(n_cols / 2) + mid_ix = col_lens.index[mid] + col_len = col_lens[mid_ix] + # adjoin adds one + adj_dif -= col_len + 1 + col_lens = col_lens.drop(mid_ix) + n_cols = len(col_lens) + + # subtract index column + max_cols_fitted = n_cols - self.fmt.index + # GH-21180. Ensure that we print at least two. + max_cols_fitted = max(max_cols_fitted, 2) + self.fmt.max_cols_fitted = max_cols_fitted + + # Call again _truncate to cut frame appropriately + # and then generate string representation + self.fmt.truncate() + strcols = self._get_strcols() + return self.adj.adjoin(1, *strcols) + + +def _binify(cols: list[int], line_width: int) -> list[int]: + adjoin_width = 1 + bins = [] + curr_width = 0 + i_last_column = len(cols) - 1 + for i, w in enumerate(cols): + w_adjoined = w + adjoin_width + curr_width += w_adjoined + if i_last_column == i: + wrap = curr_width + 1 > line_width and i > 0 + else: + wrap = curr_width + 2 > line_width and i > 0 + if wrap: + bins.append(i) + curr_width = w_adjoined + + bins.append(len(cols)) + return bins diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/style.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/style.py new file mode 100644 index 0000000000000000000000000000000000000000..b62f7581ac2205c8c1821b6030b56f13a77c6379 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/style.py @@ -0,0 +1,4136 @@ +""" +Module for applying conditional formatting to DataFrames and Series. +""" +from __future__ import annotations + +from contextlib import contextmanager +import copy +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, + Callable, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import ( + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +import pandas as pd +from pandas import ( + IndexSlice, + RangeIndex, +) +import pandas.core.common as com +from pandas.core.frame import ( + DataFrame, + Series, +) +from pandas.core.generic import NDFrame +from pandas.core.shared_docs import _shared_docs + +from pandas.io.formats.format import save_to_buffer + +jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") + +from pandas.io.formats.style_render import ( + CSSProperties, + CSSStyles, + ExtFormatter, + StylerRenderer, + Subset, + Tooltips, + format_table_styles, + maybe_convert_css_to_tuples, + non_reducing_slice, + refactor_levels, +) + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Hashable, + Sequence, + ) + + from matplotlib.colors import Colormap + + from pandas._typing import ( + Axis, + AxisInt, + FilePath, + IndexLabel, + IntervalClosedType, + Level, + QuantileInterpolation, + Scalar, + StorageOptions, + WriteBuffer, + WriteExcelBuffer, + ) + + from pandas import ExcelWriter + +try: + import matplotlib as mpl + import matplotlib.pyplot as plt + + has_mpl = True +except ImportError: + has_mpl = False + + +@contextmanager +def _mpl(func: Callable) -> Generator[tuple[Any, Any], None, None]: + if has_mpl: + yield plt, mpl + else: + raise ImportError(f"{func.__name__} requires matplotlib.") + + +#### +# Shared Doc Strings + +subset_args = """subset : label, array-like, IndexSlice, optional + A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input + or single key, to `DataFrame.loc[:, ]` where the columns are + prioritised, to limit ``data`` to *before* applying the function.""" + +properties_args = """props : str, default None + CSS properties to use for highlighting. If ``props`` is given, ``color`` + is not used.""" + +coloring_args = """color : str, default '{default}' + Background color to use for highlighting.""" + +buffering_args = """buf : str, path object, file-like object, optional + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If ``None``, the result is + returned as a string.""" + +encoding_args = """encoding : str, optional + Character encoding setting for file output (and meta tags if available). + Defaults to ``pandas.options.styler.render.encoding`` value of "utf-8".""" + +# +### + + +class Styler(StylerRenderer): + r""" + Helps style a DataFrame or Series according to the data with HTML and CSS. + + Parameters + ---------- + data : Series or DataFrame + Data to be styled - either a Series or DataFrame. + precision : int, optional + Precision to round floats to. If not given defaults to + ``pandas.options.styler.format.precision``. + + .. versionchanged:: 1.4.0 + table_styles : list-like, default None + List of {selector: (attr, value)} dicts; see Notes. + uuid : str, default None + A unique identifier to avoid CSS collisions; generated automatically. + caption : str, tuple, default None + String caption to attach to the table. Tuple only used for LaTeX dual captions. + table_attributes : str, default None + Items that show up in the opening ```` tag + in addition to automatic (by default) id. + cell_ids : bool, default True + If True, each cell will have an ``id`` attribute in their HTML tag. + The ``id`` takes the form ``T__row_col`` + where ```` is the unique identifier, ```` is the row + number and ```` is the column number. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied, and falls back to + ``pandas.options.styler.format.na_rep``. + + uuid_len : int, default 5 + If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate + expressed in hex characters, in range [0, 32]. + decimal : str, optional + Character used as decimal separator for floats, complex and integers. If not + given uses ``pandas.options.styler.format.decimal``. + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. If not + given uses ``pandas.options.styler.format.thousands``. + + .. versionadded:: 1.3.0 + + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. Use 'latex-math' to replace the characters + the same way as in 'latex' mode, except for math substrings, + which either are surrounded by two characters ``$`` or start with + the character ``\(`` and end with ``\)``. + If not given uses ``pandas.options.styler.format.escape``. + + .. versionadded:: 1.3.0 + formatter : str, callable, dict, optional + Object to define how values are displayed. See ``Styler.format``. If not given + uses ``pandas.options.styler.format.formatter``. + + .. versionadded:: 1.4.0 + + Attributes + ---------- + env : Jinja2 jinja2.Environment + template_html : Jinja2 Template + template_html_table : Jinja2 Template + template_html_style : Jinja2 Template + template_latex : Jinja2 Template + loader : Jinja2 Loader + + See Also + -------- + DataFrame.style : Return a Styler object containing methods for building + a styled HTML representation for the DataFrame. + + Notes + ----- + Most styling will be done by passing style functions into + ``Styler.apply`` or ``Styler.map``. Style functions should + return values with strings containing CSS ``'attr: value'`` that will + be applied to the indicated cells. + + If using in the Jupyter notebook, Styler has defined a ``_repr_html_`` + to automatically render itself. Otherwise call Styler.to_html to get + the generated HTML. + + CSS classes are attached to the generated HTML + + * Index and Column names include ``index_name`` and ``level`` + where `k` is its level in a MultiIndex + * Index label cells include + + * ``row_heading`` + * ``row`` where `n` is the numeric position of the row + * ``level`` where `k` is the level in a MultiIndex + + * Column label cells include + * ``col_heading`` + * ``col`` where `n` is the numeric position of the column + * ``level`` where `k` is the level in a MultiIndex + + * Blank cells include ``blank`` + * Data cells include ``data`` + * Trimmed cells include ``col_trim`` or ``row_trim``. + + Any, or all, or these classes can be renamed by using the ``css_class_names`` + argument in ``Styler.set_table_classes``, giving a value such as + *{"row": "MY_ROW_CLASS", "col_trim": "", "row_trim": ""}*. + + Examples + -------- + >>> df = pd.DataFrame([[1.0, 2.0, 3.0], [4, 5, 6]], index=['a', 'b'], + ... columns=['A', 'B', 'C']) + >>> pd.io.formats.style.Styler(df, precision=2, + ... caption="My table") # doctest: +SKIP + + Please see: + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + + def __init__( + self, + data: DataFrame | Series, + precision: int | None = None, + table_styles: CSSStyles | None = None, + uuid: str | None = None, + caption: str | tuple | list | None = None, + table_attributes: str | None = None, + cell_ids: bool = True, + na_rep: str | None = None, + uuid_len: int = 5, + decimal: str | None = None, + thousands: str | None = None, + escape: str | None = None, + formatter: ExtFormatter | None = None, + ) -> None: + super().__init__( + data=data, + uuid=uuid, + uuid_len=uuid_len, + table_styles=table_styles, + table_attributes=table_attributes, + caption=caption, + cell_ids=cell_ids, + precision=precision, + ) + + # validate ordered args + thousands = thousands or get_option("styler.format.thousands") + decimal = decimal or get_option("styler.format.decimal") + na_rep = na_rep or get_option("styler.format.na_rep") + escape = escape or get_option("styler.format.escape") + formatter = formatter or get_option("styler.format.formatter") + # precision is handled by superclass as default for performance + + self.format( + formatter=formatter, + precision=precision, + na_rep=na_rep, + escape=escape, + decimal=decimal, + thousands=thousands, + ) + + def concat(self, other: Styler) -> Styler: + """ + Append another Styler to combine the output into a single table. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + other : Styler + The other Styler object which has already been styled and formatted. The + data for this Styler must have the same columns as the original, and the + number of index levels must also be the same to render correctly. + + Returns + ------- + Styler + + Notes + ----- + The purpose of this method is to extend existing styled dataframes with other + metrics that may be useful but may not conform to the original's structure. + For example adding a sub total row, or displaying metrics such as means, + variance or counts. + + Styles that are applied using the ``apply``, ``map``, ``apply_index`` + and ``map_index``, and formatting applied with ``format`` and + ``format_index`` will be preserved. + + .. warning:: + Only the output methods ``to_html``, ``to_string`` and ``to_latex`` + currently work with concatenated Stylers. + + Other output methods, including ``to_excel``, **do not** work with + concatenated Stylers. + + The following should be noted: + + - ``table_styles``, ``table_attributes``, ``caption`` and ``uuid`` are all + inherited from the original Styler and not ``other``. + - hidden columns and hidden index levels will be inherited from the + original Styler + - ``css`` will be inherited from the original Styler, and the value of + keys ``data``, ``row_heading`` and ``row`` will be prepended with + ``foot0_``. If more concats are chained, their styles will be prepended + with ``foot1_``, ''foot_2'', etc., and if a concatenated style have + another concatanated style, the second style will be prepended with + ``foot{parent}_foot{child}_``. + + A common use case is to concatenate user defined functions with + ``DataFrame.agg`` or with described statistics via ``DataFrame.describe``. + See examples. + + Examples + -------- + A common use case is adding totals rows, or otherwise, via methods calculated + in ``DataFrame.agg``. + + >>> df = pd.DataFrame([[4, 6], [1, 9], [3, 4], [5, 5], [9, 6]], + ... columns=["Mike", "Jim"], + ... index=["Mon", "Tue", "Wed", "Thurs", "Fri"]) + >>> styler = df.style.concat(df.agg(["sum"]).style) # doctest: +SKIP + + .. figure:: ../../_static/style/footer_simple.png + + Since the concatenated object is a Styler the existing functionality can be + used to conditionally format it as well as the original. + + >>> descriptors = df.agg(["sum", "mean", lambda s: s.dtype]) + >>> descriptors.index = ["Total", "Average", "dtype"] + >>> other = (descriptors.style + ... .highlight_max(axis=1, subset=(["Total", "Average"], slice(None))) + ... .format(subset=("Average", slice(None)), precision=2, decimal=",") + ... .map(lambda v: "font-weight: bold;")) + >>> styler = (df.style + ... .highlight_max(color="salmon") + ... .set_table_styles([{"selector": ".foot_row0", + ... "props": "border-top: 1px solid black;"}])) + >>> styler.concat(other) # doctest: +SKIP + + .. figure:: ../../_static/style/footer_extended.png + + When ``other`` has fewer index levels than the original Styler it is possible + to extend the index in ``other``, with placeholder levels. + + >>> df = pd.DataFrame([[1], [2]], + ... index=pd.MultiIndex.from_product([[0], [1, 2]])) + >>> descriptors = df.agg(["sum"]) + >>> descriptors.index = pd.MultiIndex.from_product([[""], descriptors.index]) + >>> df.style.concat(descriptors.style) # doctest: +SKIP + """ + if not isinstance(other, Styler): + raise TypeError("`other` must be of type `Styler`") + if not self.data.columns.equals(other.data.columns): + raise ValueError("`other.data` must have same columns as `Styler.data`") + if not self.data.index.nlevels == other.data.index.nlevels: + raise ValueError( + "number of index levels must be same in `other` " + "as in `Styler`. See documentation for suggestions." + ) + self.concatenated.append(other) + return self + + def _repr_html_(self) -> str | None: + """ + Hooks into Jupyter notebook rich display system, which calls _repr_html_ by + default if an object is returned at the end of a cell. + """ + if get_option("styler.render.repr") == "html": + return self.to_html() + return None + + def _repr_latex_(self) -> str | None: + if get_option("styler.render.repr") == "latex": + return self.to_latex() + return None + + def set_tooltips( + self, + ttips: DataFrame, + props: CSSProperties | None = None, + css_class: str | None = None, + ) -> Styler: + """ + Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips. + + These string based tooltips are only applicable to ``
`` HTML elements, + and cannot be used for column or index headers. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + ttips : DataFrame + DataFrame containing strings that will be translated to tooltips, mapped + by identical column and index values that must exist on the underlying + Styler data. None, NaN values, and empty strings will be ignored and + not affect the rendered HTML. + props : list-like or str, optional + List of (attr, value) tuples or a valid CSS string. If ``None`` adopts + the internal default values described in notes. + css_class : str, optional + Name of the tooltip class used in CSS, should conform to HTML standards. + Only useful if integrating tooltips with external CSS. If ``None`` uses the + internal default value 'pd-t'. + + Returns + ------- + Styler + + Notes + ----- + Tooltips are created by adding `` to each data cell + and then manipulating the table level CSS to attach pseudo hover and pseudo + after selectors to produce the required the results. + + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + The property 'visibility: hidden;' is a key prerequisite to the hover + functionality, and should always be included in any manual properties + specification, using the ``props`` argument. + + Tooltips are not designed to be efficient, and can add large amounts of + additional HTML for larger tables, since they also require that ``cell_ids`` + is forced to `True`. + + Examples + -------- + Basic application + + >>> df = pd.DataFrame(data=[[0, 1], [2, 3]]) + >>> ttips = pd.DataFrame( + ... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index + ... ) + >>> s = df.style.set_tooltips(ttips).to_html() + + Optionally controlling the tooltip visual display + + >>> df.style.set_tooltips(ttips, css_class='tt-add', props=[ + ... ('visibility', 'hidden'), + ... ('position', 'absolute'), + ... ('z-index', 1)]) # doctest: +SKIP + >>> df.style.set_tooltips(ttips, css_class='tt-add', + ... props='visibility:hidden; position:absolute; z-index:1;') + ... # doctest: +SKIP + """ + if not self.cell_ids: + # tooltips not optimised for individual cell check. requires reasonable + # redesign and more extensive code for a feature that might be rarely used. + raise NotImplementedError( + "Tooltips can only render with 'cell_ids' is True." + ) + if not ttips.index.is_unique or not ttips.columns.is_unique: + raise KeyError( + "Tooltips render only if `ttips` has unique index and columns." + ) + if self.tooltips is None: # create a default instance if necessary + self.tooltips = Tooltips() + self.tooltips.tt_data = ttips + if props: + self.tooltips.class_properties = props + if css_class: + self.tooltips.class_name = css_class + + return self + + @doc( + NDFrame.to_excel, + klass="Styler", + storage_options=_shared_docs["storage_options"], + storage_options_versionadded="1.5.0", + ) + def to_excel( + self, + excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, + sheet_name: str = "Sheet1", + na_rep: str = "", + float_format: str | None = None, + columns: Sequence[Hashable] | None = None, + header: Sequence[Hashable] | bool = True, + index: bool = True, + index_label: IndexLabel | None = None, + startrow: int = 0, + startcol: int = 0, + engine: str | None = None, + merge_cells: bool = True, + encoding: str | None = None, + inf_rep: str = "inf", + verbose: bool = True, + freeze_panes: tuple[int, int] | None = None, + storage_options: StorageOptions | None = None, + ) -> None: + from pandas.io.formats.excel import ExcelFormatter + + formatter = ExcelFormatter( + self, + na_rep=na_rep, + cols=columns, + header=header, + float_format=float_format, + index=index, + index_label=index_label, + merge_cells=merge_cells, + inf_rep=inf_rep, + ) + formatter.write( + excel_writer, + sheet_name=sheet_name, + startrow=startrow, + startcol=startcol, + freeze_panes=freeze_panes, + engine=engine, + storage_options=storage_options, + ) + + @overload + def to_latex( + self, + buf: FilePath | WriteBuffer[str], + *, + column_format: str | None = ..., + position: str | None = ..., + position_float: str | None = ..., + hrules: bool | None = ..., + clines: str | None = ..., + label: str | None = ..., + caption: str | tuple | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + multirow_align: str | None = ..., + multicol_align: str | None = ..., + siunitx: bool = ..., + environment: str | None = ..., + encoding: str | None = ..., + convert_css: bool = ..., + ) -> None: + ... + + @overload + def to_latex( + self, + buf: None = ..., + *, + column_format: str | None = ..., + position: str | None = ..., + position_float: str | None = ..., + hrules: bool | None = ..., + clines: str | None = ..., + label: str | None = ..., + caption: str | tuple | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + multirow_align: str | None = ..., + multicol_align: str | None = ..., + siunitx: bool = ..., + environment: str | None = ..., + encoding: str | None = ..., + convert_css: bool = ..., + ) -> str: + ... + + def to_latex( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + column_format: str | None = None, + position: str | None = None, + position_float: str | None = None, + hrules: bool | None = None, + clines: str | None = None, + label: str | None = None, + caption: str | tuple | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + multirow_align: str | None = None, + multicol_align: str | None = None, + siunitx: bool = False, + environment: str | None = None, + encoding: str | None = None, + convert_css: bool = False, + ) -> str | None: + r""" + Write Styler to a file, buffer or string in LaTeX format. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + buf : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``write()`` function. If None, the result is + returned as a string. + column_format : str, optional + The LaTeX column specification placed in location: + + \\begin{tabular}{} + + Defaults to 'l' for index and + non-numeric data columns, and, for numeric data columns, + to 'r' by default, or 'S' if ``siunitx`` is ``True``. + position : str, optional + The LaTeX positional argument (e.g. 'h!') for tables, placed in location: + + ``\\begin{table}[]``. + position_float : {"centering", "raggedleft", "raggedright"}, optional + The LaTeX float command placed in location: + + \\begin{table}[] + + \\ + + Cannot be used if ``environment`` is "longtable". + hrules : bool + Set to `True` to add \\toprule, \\midrule and \\bottomrule from the + {booktabs} LaTeX package. + Defaults to ``pandas.options.styler.latex.hrules``, which is `False`. + + .. versionchanged:: 1.4.0 + clines : str, optional + Use to control adding \\cline commands for the index labels separation. + Possible values are: + + - `None`: no cline commands are added (default). + - `"all;data"`: a cline is added for every index value extending the + width of the table, including data entries. + - `"all;index"`: as above with lines extending only the width of the + index entries. + - `"skip-last;data"`: a cline is added for each index value except the + last level (which is never sparsified), extending the widtn of the + table. + - `"skip-last;index"`: as above with lines extending only the width of the + index entries. + + .. versionadded:: 1.4.0 + label : str, optional + The LaTeX label included as: \\label{
}. + If tuple, i.e ("full caption", "short caption"), the caption included + as: \\caption[]{}. + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index``, which is `True`. + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns``, which + is `True`. + multirow_align : {"c", "t", "b", "naive"}, optional + If sparsifying hierarchical MultiIndexes whether to align text centrally, + at the top or bottom using the multirow package. If not given defaults to + ``pandas.options.styler.latex.multirow_align``, which is `"c"`. + If "naive" is given renders without multirow. + + .. versionchanged:: 1.4.0 + multicol_align : {"r", "c", "l", "naive-l", "naive-r"}, optional + If sparsifying hierarchical MultiIndex columns whether to align text at + the left, centrally, or at the right. If not given defaults to + ``pandas.options.styler.latex.multicol_align``, which is "r". + If a naive option is given renders without multicol. + Pipe decorators can also be added to non-naive values to draw vertical + rules, e.g. "\|r" will draw a rule on the left side of right aligned merged + cells. + + .. versionchanged:: 1.4.0 + siunitx : bool, default False + Set to ``True`` to structure LaTeX compatible with the {siunitx} package. + environment : str, optional + If given, the environment that will replace 'table' in ``\\begin{table}``. + If 'longtable' is specified then a more suitable template is + rendered. If not given defaults to + ``pandas.options.styler.latex.environment``, which is `None`. + + .. versionadded:: 1.4.0 + encoding : str, optional + Character encoding setting. Defaults + to ``pandas.options.styler.render.encoding``, which is "utf-8". + convert_css : bool, default False + Convert simple cell-styles from CSS to LaTeX format. Any CSS not found in + conversion table is dropped. A style can be forced by adding option + `--latex`. See notes. + + Returns + ------- + str or None + If `buf` is None, returns the result as a string. Otherwise returns `None`. + + See Also + -------- + Styler.format: Format the text display value of cells. + + Notes + ----- + **Latex Packages** + + For the following features we recommend the following LaTeX inclusions: + + ===================== ========================================================== + Feature Inclusion + ===================== ========================================================== + sparse columns none: included within default {tabular} environment + sparse rows \\usepackage{multirow} + hrules \\usepackage{booktabs} + colors \\usepackage[table]{xcolor} + siunitx \\usepackage{siunitx} + bold (with siunitx) | \\usepackage{etoolbox} + | \\robustify\\bfseries + | \\sisetup{detect-all = true} *(within {document})* + italic (with siunitx) | \\usepackage{etoolbox} + | \\robustify\\itshape + | \\sisetup{detect-all = true} *(within {document})* + environment \\usepackage{longtable} if arg is "longtable" + | or any other relevant environment package + hyperlinks \\usepackage{hyperref} + ===================== ========================================================== + + **Cell Styles** + + LaTeX styling can only be rendered if the accompanying styling functions have + been constructed with appropriate LaTeX commands. All styling + functionality is built around the concept of a CSS ``(, )`` + pair (see `Table Visualization <../../user_guide/style.ipynb>`_), and this + should be replaced by a LaTeX + ``(, )`` approach. Each cell will be styled individually + using nested LaTeX commands with their accompanied options. + + For example the following code will highlight and bold a cell in HTML-CSS: + + >>> df = pd.DataFrame([[1,2], [3,4]]) + >>> s = df.style.highlight_max(axis=None, + ... props='background-color:red; font-weight:bold;') + >>> s.to_html() # doctest: +SKIP + + The equivalent using LaTeX only commands is the following: + + >>> s = df.style.highlight_max(axis=None, + ... props='cellcolor:{red}; bfseries: ;') + >>> s.to_latex() # doctest: +SKIP + + Internally these structured LaTeX ``(, )`` pairs + are translated to the + ``display_value`` with the default structure: + ``\ ``. + Where there are multiple commands the latter is nested recursively, so that + the above example highlighted cell is rendered as + ``\cellcolor{red} \bfseries 4``. + + Occasionally this format does not suit the applied command, or + combination of LaTeX packages that is in use, so additional flags can be + added to the ````, within the tuple, to result in different + positions of required braces (the **default** being the same as ``--nowrap``): + + =================================== ============================================ + Tuple Format Output Structure + =================================== ============================================ + (,) \\ + (, ``--nowrap``) \\ + (, ``--rwrap``) \\{} + (, ``--wrap``) {\\ } + (, ``--lwrap``) {\\} + (, ``--dwrap``) {\\}{} + =================================== ============================================ + + For example the `textbf` command for font-weight + should always be used with `--rwrap` so ``('textbf', '--rwrap')`` will render a + working cell, wrapped with braces, as ``\textbf{}``. + + A more comprehensive example is as follows: + + >>> df = pd.DataFrame([[1, 2.2, "dogs"], [3, 4.4, "cats"], [2, 6.6, "cows"]], + ... index=["ix1", "ix2", "ix3"], + ... columns=["Integers", "Floats", "Strings"]) + >>> s = df.style.highlight_max( + ... props='cellcolor:[HTML]{FFFF00}; color:{red};' + ... 'textit:--rwrap; textbf:--rwrap;' + ... ) + >>> s.to_latex() # doctest: +SKIP + + .. figure:: ../../_static/style/latex_1.png + + **Table Styles** + + Internally Styler uses its ``table_styles`` object to parse the + ``column_format``, ``position``, ``position_float``, and ``label`` + input arguments. These arguments are added to table styles in the format: + + .. code-block:: python + + set_table_styles([ + {"selector": "column_format", "props": f":{column_format};"}, + {"selector": "position", "props": f":{position};"}, + {"selector": "position_float", "props": f":{position_float};"}, + {"selector": "label", "props": f":{{{label.replace(':','§')}}};"} + ], overwrite=False) + + Exception is made for the ``hrules`` argument which, in fact, controls all three + commands: ``toprule``, ``bottomrule`` and ``midrule`` simultaneously. Instead of + setting ``hrules`` to ``True``, it is also possible to set each + individual rule definition, by manually setting the ``table_styles``, + for example below we set a regular ``toprule``, set an ``hline`` for + ``bottomrule`` and exclude the ``midrule``: + + .. code-block:: python + + set_table_styles([ + {'selector': 'toprule', 'props': ':toprule;'}, + {'selector': 'bottomrule', 'props': ':hline;'}, + ], overwrite=False) + + If other ``commands`` are added to table styles they will be detected, and + positioned immediately above the '\\begin{tabular}' command. For example to + add odd and even row coloring, from the {colortbl} package, in format + ``\rowcolors{1}{pink}{red}``, use: + + .. code-block:: python + + set_table_styles([ + {'selector': 'rowcolors', 'props': ':{1}{pink}{red};'} + ], overwrite=False) + + A more comprehensive example using these arguments is as follows: + + >>> df.columns = pd.MultiIndex.from_tuples([ + ... ("Numeric", "Integers"), + ... ("Numeric", "Floats"), + ... ("Non-Numeric", "Strings") + ... ]) + >>> df.index = pd.MultiIndex.from_tuples([ + ... ("L0", "ix1"), ("L0", "ix2"), ("L1", "ix3") + ... ]) + >>> s = df.style.highlight_max( + ... props='cellcolor:[HTML]{FFFF00}; color:{red}; itshape:; bfseries:;' + ... ) + >>> s.to_latex( + ... column_format="rrrrr", position="h", position_float="centering", + ... hrules=True, label="table:5", caption="Styled LaTeX Table", + ... multirow_align="t", multicol_align="r" + ... ) # doctest: +SKIP + + .. figure:: ../../_static/style/latex_2.png + + **Formatting** + + To format values :meth:`Styler.format` should be used prior to calling + `Styler.to_latex`, as well as other methods such as :meth:`Styler.hide` + for example: + + >>> s.clear() + >>> s.table_styles = [] + >>> s.caption = None + >>> s.format({ + ... ("Numeric", "Integers"): '\${}', + ... ("Numeric", "Floats"): '{:.3f}', + ... ("Non-Numeric", "Strings"): str.upper + ... }) # doctest: +SKIP + Numeric Non-Numeric + Integers Floats Strings + L0 ix1 $1 2.200 DOGS + ix2 $3 4.400 CATS + L1 ix3 $2 6.600 COWS + + >>> s.to_latex() # doctest: +SKIP + \begin{tabular}{llrrl} + {} & {} & \multicolumn{2}{r}{Numeric} & {Non-Numeric} \\ + {} & {} & {Integers} & {Floats} & {Strings} \\ + \multirow[c]{2}{*}{L0} & ix1 & \\$1 & 2.200 & DOGS \\ + & ix2 & \$3 & 4.400 & CATS \\ + L1 & ix3 & \$2 & 6.600 & COWS \\ + \end{tabular} + + **CSS Conversion** + + This method can convert a Styler constructured with HTML-CSS to LaTeX using + the following limited conversions. + + ================== ==================== ============= ========================== + CSS Attribute CSS value LaTeX Command LaTeX Options + ================== ==================== ============= ========================== + font-weight | bold | bfseries + | bolder | bfseries + font-style | italic | itshape + | oblique | slshape + background-color | red cellcolor | {red}--lwrap + | #fe01ea | [HTML]{FE01EA}--lwrap + | #f0e | [HTML]{FF00EE}--lwrap + | rgb(128,255,0) | [rgb]{0.5,1,0}--lwrap + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0}--lwrap + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5}--lwrap + color | red color | {red} + | #fe01ea | [HTML]{FE01EA} + | #f0e | [HTML]{FF00EE} + | rgb(128,255,0) | [rgb]{0.5,1,0} + | rgba(128,0,0,0.5) | [rgb]{0.5,0,0} + | rgb(25%,255,50%) | [rgb]{0.25,1,0.5} + ================== ==================== ============= ========================== + + It is also possible to add user-defined LaTeX only styles to a HTML-CSS Styler + using the ``--latex`` flag, and to add LaTeX parsing options that the + converter will detect within a CSS-comment. + + >>> df = pd.DataFrame([[1]]) + >>> df.style.set_properties( + ... **{"font-weight": "bold /* --dwrap */", "Huge": "--latex--rwrap"} + ... ).to_latex(convert_css=True) # doctest: +SKIP + \begin{tabular}{lr} + {} & {0} \\ + 0 & {\bfseries}{\Huge{1}} \\ + \end{tabular} + + Examples + -------- + Below we give a complete step by step example adding some advanced features + and noting some common gotchas. + + First we create the DataFrame and Styler as usual, including MultiIndex rows + and columns, which allow for more advanced formatting options: + + >>> cidx = pd.MultiIndex.from_arrays([ + ... ["Equity", "Equity", "Equity", "Equity", + ... "Stats", "Stats", "Stats", "Stats", "Rating"], + ... ["Energy", "Energy", "Consumer", "Consumer", "", "", "", "", ""], + ... ["BP", "Shell", "H&M", "Unilever", + ... "Std Dev", "Variance", "52w High", "52w Low", ""] + ... ]) + >>> iidx = pd.MultiIndex.from_arrays([ + ... ["Equity", "Equity", "Equity", "Equity"], + ... ["Energy", "Energy", "Consumer", "Consumer"], + ... ["BP", "Shell", "H&M", "Unilever"] + ... ]) + >>> styler = pd.DataFrame([ + ... [1, 0.8, 0.66, 0.72, 32.1678, 32.1678**2, 335.12, 240.89, "Buy"], + ... [0.8, 1.0, 0.69, 0.79, 1.876, 1.876**2, 14.12, 19.78, "Hold"], + ... [0.66, 0.69, 1.0, 0.86, 7, 7**2, 210.9, 140.6, "Buy"], + ... [0.72, 0.79, 0.86, 1.0, 213.76, 213.76**2, 2807, 3678, "Sell"], + ... ], columns=cidx, index=iidx).style + + Second we will format the display and, since our table is quite wide, will + hide the repeated level-0 of the index: + + >>> (styler.format(subset="Equity", precision=2) + ... .format(subset="Stats", precision=1, thousands=",") + ... .format(subset="Rating", formatter=str.upper) + ... .format_index(escape="latex", axis=1) + ... .format_index(escape="latex", axis=0) + ... .hide(level=0, axis=0)) # doctest: +SKIP + + Note that one of the string entries of the index and column headers is "H&M". + Without applying the `escape="latex"` option to the `format_index` method the + resultant LaTeX will fail to render, and the error returned is quite + difficult to debug. Using the appropriate escape the "&" is converted to "\\&". + + Thirdly we will apply some (CSS-HTML) styles to our object. We will use a + builtin method and also define our own method to highlight the stock + recommendation: + + >>> def rating_color(v): + ... if v == "Buy": color = "#33ff85" + ... elif v == "Sell": color = "#ff5933" + ... else: color = "#ffdd33" + ... return f"color: {color}; font-weight: bold;" + >>> (styler.background_gradient(cmap="inferno", subset="Equity", vmin=0, vmax=1) + ... .map(rating_color, subset="Rating")) # doctest: +SKIP + + All the above styles will work with HTML (see below) and LaTeX upon conversion: + + .. figure:: ../../_static/style/latex_stocks_html.png + + However, we finally want to add one LaTeX only style + (from the {graphicx} package), that is not easy to convert from CSS and + pandas does not support it. Notice the `--latex` flag used here, + as well as `--rwrap` to ensure this is formatted correctly and + not ignored upon conversion. + + >>> styler.map_index( + ... lambda v: "rotatebox:{45}--rwrap--latex;", level=2, axis=1 + ... ) # doctest: +SKIP + + Finally we render our LaTeX adding in other options as required: + + >>> styler.to_latex( + ... caption="Selected stock correlation and simple statistics.", + ... clines="skip-last;data", + ... convert_css=True, + ... position_float="centering", + ... multicol_align="|c|", + ... hrules=True, + ... ) # doctest: +SKIP + \begin{table} + \centering + \caption{Selected stock correlation and simple statistics.} + \begin{tabular}{llrrrrrrrrl} + \toprule + & & \multicolumn{4}{|c|}{Equity} & \multicolumn{4}{|c|}{Stats} & Rating \\ + & & \multicolumn{2}{|c|}{Energy} & \multicolumn{2}{|c|}{Consumer} & + \multicolumn{4}{|c|}{} & \\ + & & \rotatebox{45}{BP} & \rotatebox{45}{Shell} & \rotatebox{45}{H\&M} & + \rotatebox{45}{Unilever} & \rotatebox{45}{Std Dev} & \rotatebox{45}{Variance} & + \rotatebox{45}{52w High} & \rotatebox{45}{52w Low} & \rotatebox{45}{} \\ + \midrule + \multirow[c]{2}{*}{Energy} & BP & {\cellcolor[HTML]{FCFFA4}} + \color[HTML]{000000} 1.00 & {\cellcolor[HTML]{FCA50A}} \color[HTML]{000000} + 0.80 & {\cellcolor[HTML]{EB6628}} \color[HTML]{F1F1F1} 0.66 & + {\cellcolor[HTML]{F68013}} \color[HTML]{F1F1F1} 0.72 & 32.2 & 1,034.8 & 335.1 + & 240.9 & \color[HTML]{33FF85} \bfseries BUY \\ + & Shell & {\cellcolor[HTML]{FCA50A}} \color[HTML]{000000} 0.80 & + {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & + {\cellcolor[HTML]{F1731D}} \color[HTML]{F1F1F1} 0.69 & + {\cellcolor[HTML]{FCA108}} \color[HTML]{000000} 0.79 & 1.9 & 3.5 & 14.1 & + 19.8 & \color[HTML]{FFDD33} \bfseries HOLD \\ + \cline{1-11} + \multirow[c]{2}{*}{Consumer} & H\&M & {\cellcolor[HTML]{EB6628}} + \color[HTML]{F1F1F1} 0.66 & {\cellcolor[HTML]{F1731D}} \color[HTML]{F1F1F1} + 0.69 & {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & + {\cellcolor[HTML]{FAC42A}} \color[HTML]{000000} 0.86 & 7.0 & 49.0 & 210.9 & + 140.6 & \color[HTML]{33FF85} \bfseries BUY \\ + & Unilever & {\cellcolor[HTML]{F68013}} \color[HTML]{F1F1F1} 0.72 & + {\cellcolor[HTML]{FCA108}} \color[HTML]{000000} 0.79 & + {\cellcolor[HTML]{FAC42A}} \color[HTML]{000000} 0.86 & + {\cellcolor[HTML]{FCFFA4}} \color[HTML]{000000} 1.00 & 213.8 & 45,693.3 & + 2,807.0 & 3,678.0 & \color[HTML]{FF5933} \bfseries SELL \\ + \cline{1-11} + \bottomrule + \end{tabular} + \end{table} + + .. figure:: ../../_static/style/latex_stocks.png + """ + obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self + + table_selectors = ( + [style["selector"] for style in self.table_styles] + if self.table_styles is not None + else [] + ) + + if column_format is not None: + # add more recent setting to table_styles + obj.set_table_styles( + [{"selector": "column_format", "props": f":{column_format}"}], + overwrite=False, + ) + elif "column_format" in table_selectors: + pass # adopt what has been previously set in table_styles + else: + # create a default: set float, complex, int cols to 'r' ('S'), index to 'l' + _original_columns = self.data.columns + self.data.columns = RangeIndex(stop=len(self.data.columns)) + numeric_cols = self.data._get_numeric_data().columns.to_list() + self.data.columns = _original_columns + column_format = "" + for level in range(self.index.nlevels): + column_format += "" if self.hide_index_[level] else "l" + for ci, _ in enumerate(self.data.columns): + if ci not in self.hidden_columns: + column_format += ( + ("r" if not siunitx else "S") if ci in numeric_cols else "l" + ) + obj.set_table_styles( + [{"selector": "column_format", "props": f":{column_format}"}], + overwrite=False, + ) + + if position: + obj.set_table_styles( + [{"selector": "position", "props": f":{position}"}], + overwrite=False, + ) + + if position_float: + if environment == "longtable": + raise ValueError( + "`position_float` cannot be used in 'longtable' `environment`" + ) + if position_float not in ["raggedright", "raggedleft", "centering"]: + raise ValueError( + f"`position_float` should be one of " + f"'raggedright', 'raggedleft', 'centering', " + f"got: '{position_float}'" + ) + obj.set_table_styles( + [{"selector": "position_float", "props": f":{position_float}"}], + overwrite=False, + ) + + hrules = get_option("styler.latex.hrules") if hrules is None else hrules + if hrules: + obj.set_table_styles( + [ + {"selector": "toprule", "props": ":toprule"}, + {"selector": "midrule", "props": ":midrule"}, + {"selector": "bottomrule", "props": ":bottomrule"}, + ], + overwrite=False, + ) + + if label: + obj.set_table_styles( + [{"selector": "label", "props": f":{{{label.replace(':', '§')}}}"}], + overwrite=False, + ) + + if caption: + obj.set_caption(caption) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + environment = environment or get_option("styler.latex.environment") + multicol_align = multicol_align or get_option("styler.latex.multicol_align") + multirow_align = multirow_align or get_option("styler.latex.multirow_align") + latex = obj._render_latex( + sparse_index=sparse_index, + sparse_columns=sparse_columns, + multirow_align=multirow_align, + multicol_align=multicol_align, + environment=environment, + convert_css=convert_css, + siunitx=siunitx, + clines=clines, + ) + + encoding = ( + (encoding or get_option("styler.render.encoding")) + if isinstance(buf, str) # i.e. a filepath + else encoding + ) + return save_to_buffer(latex, buf=buf, encoding=encoding) + + @overload + def to_html( + self, + buf: FilePath | WriteBuffer[str], + *, + table_uuid: str | None = ..., + table_attributes: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + bold_headers: bool = ..., + caption: str | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + encoding: str | None = ..., + doctype_html: bool = ..., + exclude_styles: bool = ..., + **kwargs, + ) -> None: + ... + + @overload + def to_html( + self, + buf: None = ..., + *, + table_uuid: str | None = ..., + table_attributes: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + bold_headers: bool = ..., + caption: str | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + encoding: str | None = ..., + doctype_html: bool = ..., + exclude_styles: bool = ..., + **kwargs, + ) -> str: + ... + + @Substitution(buf=buffering_args, encoding=encoding_args) + def to_html( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + table_uuid: str | None = None, + table_attributes: str | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + bold_headers: bool = False, + caption: str | None = None, + max_rows: int | None = None, + max_columns: int | None = None, + encoding: str | None = None, + doctype_html: bool = False, + exclude_styles: bool = False, + **kwargs, + ) -> str | None: + """ + Write Styler to a file, buffer or string in HTML-CSS format. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + %(buf)s + table_uuid : str, optional + Id attribute assigned to the HTML element in the format: + + ``
`` + + If not given uses Styler's initially assigned value. + table_attributes : str, optional + Attributes to assign within the `
` HTML element in the format: + + ``
>`` + + If not given defaults to Styler's preexisting value. + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index`` value. + + .. versionadded:: 1.4.0 + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns`` value. + + .. versionadded:: 1.4.0 + bold_headers : bool, optional + Adds "font-weight: bold;" as a CSS property to table style header cells. + + .. versionadded:: 1.4.0 + caption : str, optional + Set, or overwrite, the caption on Styler before rendering. + + .. versionadded:: 1.4.0 + max_rows : int, optional + The maximum number of rows that will be rendered. Defaults to + ``pandas.options.styler.render.max_rows/max_columns``. + + .. versionadded:: 1.4.0 + max_columns : int, optional + The maximum number of columns that will be rendered. Defaults to + ``pandas.options.styler.render.max_columns``, which is None. + + Rows and columns may be reduced if the number of total elements is + large. This value is set to ``pandas.options.styler.render.max_elements``, + which is 262144 (18 bit browser rendering). + + .. versionadded:: 1.4.0 + %(encoding)s + doctype_html : bool, default False + Whether to output a fully structured HTML file including all + HTML elements, or just the core `` +
+ + + + + + + ... + """ + obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self + + if table_uuid: + obj.set_uuid(table_uuid) + + if table_attributes: + obj.set_table_attributes(table_attributes) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + + if bold_headers: + obj.set_table_styles( + [{"selector": "th", "props": "font-weight: bold;"}], overwrite=False + ) + + if caption is not None: + obj.set_caption(caption) + + # Build HTML string.. + html = obj._render_html( + sparse_index=sparse_index, + sparse_columns=sparse_columns, + max_rows=max_rows, + max_cols=max_columns, + exclude_styles=exclude_styles, + encoding=encoding or get_option("styler.render.encoding"), + doctype_html=doctype_html, + **kwargs, + ) + + return save_to_buffer( + html, buf=buf, encoding=(encoding if buf is not None else None) + ) + + @overload + def to_string( + self, + buf: FilePath | WriteBuffer[str], + *, + encoding: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + delimiter: str = ..., + ) -> None: + ... + + @overload + def to_string( + self, + buf: None = ..., + *, + encoding: str | None = ..., + sparse_index: bool | None = ..., + sparse_columns: bool | None = ..., + max_rows: int | None = ..., + max_columns: int | None = ..., + delimiter: str = ..., + ) -> str: + ... + + @Substitution(buf=buffering_args, encoding=encoding_args) + def to_string( + self, + buf: FilePath | WriteBuffer[str] | None = None, + *, + encoding: str | None = None, + sparse_index: bool | None = None, + sparse_columns: bool | None = None, + max_rows: int | None = None, + max_columns: int | None = None, + delimiter: str = " ", + ) -> str | None: + """ + Write Styler to a file, buffer or string in text format. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + %(buf)s + %(encoding)s + sparse_index : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each row. + Defaults to ``pandas.options.styler.sparse.index`` value. + sparse_columns : bool, optional + Whether to sparsify the display of a hierarchical index. Setting to False + will display each explicit level element in a hierarchical key for each + column. Defaults to ``pandas.options.styler.sparse.columns`` value. + max_rows : int, optional + The maximum number of rows that will be rendered. Defaults to + ``pandas.options.styler.render.max_rows``, which is None. + max_columns : int, optional + The maximum number of columns that will be rendered. Defaults to + ``pandas.options.styler.render.max_columns``, which is None. + + Rows and columns may be reduced if the number of total elements is + large. This value is set to ``pandas.options.styler.render.max_elements``, + which is 262144 (18 bit browser rendering). + delimiter : str, default single space + The separator between data elements. + + Returns + ------- + str or None + If `buf` is None, returns the result as a string. Otherwise returns `None`. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> df.style.to_string() + ' A B\\n0 1 3\\n1 2 4\\n' + """ + obj = self._copy(deepcopy=True) + + if sparse_index is None: + sparse_index = get_option("styler.sparse.index") + if sparse_columns is None: + sparse_columns = get_option("styler.sparse.columns") + + text = obj._render_string( + sparse_columns=sparse_columns, + sparse_index=sparse_index, + max_rows=max_rows, + max_cols=max_columns, + delimiter=delimiter, + ) + return save_to_buffer( + text, buf=buf, encoding=(encoding if buf is not None else None) + ) + + def set_td_classes(self, classes: DataFrame) -> Styler: + """ + Set the ``class`` attribute of ``
 AB
`` HTML elements. + + Parameters + ---------- + classes : DataFrame + DataFrame containing strings that will be translated to CSS classes, + mapped by identical column and index key values that must exist on the + underlying Styler data. None, NaN values, and empty strings will + be ignored and not affect the rendered HTML. + + Returns + ------- + Styler + + See Also + -------- + Styler.set_table_styles: Set the table styles included within the ``' + '' + ' ' + ' ' + ' ' + ' ' + ' ' + ' ' + '
0
1
' + """ + if not classes.index.is_unique or not classes.columns.is_unique: + raise KeyError( + "Classes render only if `classes` has unique index and columns." + ) + classes = classes.reindex_like(self.data) + + for r, row_tup in enumerate(classes.itertuples()): + for c, value in enumerate(row_tup[1:]): + if not (pd.isna(value) or value == ""): + self.cell_context[(r, c)] = str(value) + + return self + + def _update_ctx(self, attrs: DataFrame) -> None: + """ + Update the state of the ``Styler`` for data cells. + + Collects a mapping of {index_label: [('', ''), ..]}. + + Parameters + ---------- + attrs : DataFrame + should contain strings of ': ;: ' + Whitespace shouldn't matter and the final trailing ';' shouldn't + matter. + """ + if not self.index.is_unique or not self.columns.is_unique: + raise KeyError( + "`Styler.apply` and `.map` are not compatible " + "with non-unique index or columns." + ) + + for cn in attrs.columns: + j = self.columns.get_loc(cn) + ser = attrs[cn] + for rn, c in ser.items(): + if not c or pd.isna(c): + continue + css_list = maybe_convert_css_to_tuples(c) + i = self.index.get_loc(rn) + self.ctx[(i, j)].extend(css_list) + + def _update_ctx_header(self, attrs: DataFrame, axis: AxisInt) -> None: + """ + Update the state of the ``Styler`` for header cells. + + Collects a mapping of {index_label: [('', ''), ..]}. + + Parameters + ---------- + attrs : Series + Should contain strings of ': ;: ', and an + integer index. + Whitespace shouldn't matter and the final trailing ';' shouldn't + matter. + axis : int + Identifies whether the ctx object being updated is the index or columns + """ + for j in attrs.columns: + ser = attrs[j] + for i, c in ser.items(): + if not c: + continue + css_list = maybe_convert_css_to_tuples(c) + if axis == 0: + self.ctx_index[(i, j)].extend(css_list) + else: + self.ctx_columns[(j, i)].extend(css_list) + + def _copy(self, deepcopy: bool = False) -> Styler: + """ + Copies a Styler, allowing for deepcopy or shallow copy + + Copying a Styler aims to recreate a new Styler object which contains the same + data and styles as the original. + + Data dependent attributes [copied and NOT exported]: + - formatting (._display_funcs) + - hidden index values or column values (.hidden_rows, .hidden_columns) + - tooltips + - cell_context (cell css classes) + - ctx (cell css styles) + - caption + - concatenated stylers + + Non-data dependent attributes [copied and exported]: + - css + - hidden index state and hidden columns state (.hide_index_, .hide_columns_) + - table_attributes + - table_styles + - applied styles (_todo) + + """ + # GH 40675, 52728 + styler = type(self)( + self.data, # populates attributes 'data', 'columns', 'index' as shallow + ) + shallow = [ # simple string or boolean immutables + "hide_index_", + "hide_columns_", + "hide_column_names", + "hide_index_names", + "table_attributes", + "cell_ids", + "caption", + "uuid", + "uuid_len", + "template_latex", # also copy templates if these have been customised + "template_html_style", + "template_html_table", + "template_html", + ] + deep = [ # nested lists or dicts + "css", + "concatenated", + "_display_funcs", + "_display_funcs_index", + "_display_funcs_columns", + "hidden_rows", + "hidden_columns", + "ctx", + "ctx_index", + "ctx_columns", + "cell_context", + "_todo", + "table_styles", + "tooltips", + ] + + for attr in shallow: + setattr(styler, attr, getattr(self, attr)) + + for attr in deep: + val = getattr(self, attr) + setattr(styler, attr, copy.deepcopy(val) if deepcopy else val) + + return styler + + def __copy__(self) -> Styler: + return self._copy(deepcopy=False) + + def __deepcopy__(self, memo) -> Styler: + return self._copy(deepcopy=True) + + def clear(self) -> None: + """ + Reset the ``Styler``, removing any previously applied styles. + + Returns None. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, np.nan]}) + + After any added style: + + >>> df.style.highlight_null(color='yellow') # doctest: +SKIP + + Remove it with: + + >>> df.style.clear() # doctest: +SKIP + + Please see: + `Table Visualization <../../user_guide/style.ipynb>`_ for more examples. + """ + # create default GH 40675 + clean_copy = Styler(self.data, uuid=self.uuid) + clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)] + self_attrs = [a for a in self.__dict__ if not callable(a)] # maybe more attrs + for attr in clean_attrs: + setattr(self, attr, getattr(clean_copy, attr)) + for attr in set(self_attrs).difference(clean_attrs): + delattr(self, attr) + + def _apply( + self, + func: Callable, + axis: Axis | None = 0, + subset: Subset | None = None, + **kwargs, + ) -> Styler: + subset = slice(None) if subset is None else subset + subset = non_reducing_slice(subset) + data = self.data.loc[subset] + if data.empty: + result = DataFrame() + elif axis is None: + result = func(data, **kwargs) + if not isinstance(result, DataFrame): + if not isinstance(result, np.ndarray): + raise TypeError( + f"Function {repr(func)} must return a DataFrame or ndarray " + f"when passed to `Styler.apply` with axis=None" + ) + if data.shape != result.shape: + raise ValueError( + f"Function {repr(func)} returned ndarray with wrong shape.\n" + f"Result has shape: {result.shape}\n" + f"Expected shape: {data.shape}" + ) + result = DataFrame(result, index=data.index, columns=data.columns) + else: + axis = self.data._get_axis_number(axis) + if axis == 0: + result = data.apply(func, axis=0, **kwargs) + else: + result = data.T.apply(func, axis=0, **kwargs).T # see GH 42005 + + if isinstance(result, Series): + raise ValueError( + f"Function {repr(func)} resulted in the apply method collapsing to a " + f"Series.\nUsually, this is the result of the function returning a " + f"single value, instead of list-like." + ) + msg = ( + f"Function {repr(func)} created invalid {{0}} labels.\nUsually, this is " + f"the result of the function returning a " + f"{'Series' if axis is not None else 'DataFrame'} which contains invalid " + f"labels, or returning an incorrectly shaped, list-like object which " + f"cannot be mapped to labels, possibly due to applying the function along " + f"the wrong axis.\n" + f"Result {{0}} has shape: {{1}}\n" + f"Expected {{0}} shape: {{2}}" + ) + if not all(result.index.isin(data.index)): + raise ValueError(msg.format("index", result.index.shape, data.index.shape)) + if not all(result.columns.isin(data.columns)): + raise ValueError( + msg.format("columns", result.columns.shape, data.columns.shape) + ) + self._update_ctx(result) + return self + + @Substitution(subset=subset_args) + def apply( + self, + func: Callable, + axis: Axis | None = 0, + subset: Subset | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function column-wise, row-wise, or table-wise. + + Updates the HTML representation with the result. + + Parameters + ---------- + func : function + ``func`` should take a Series if ``axis`` in [0,1] and return a list-like + object of same length, or a Series, not necessarily of same length, with + valid index labels considering ``subset``. + ``func`` should take a DataFrame if ``axis`` is ``None`` and return either + an ndarray with the same shape or a DataFrame, not necessarily of the same + shape, with valid index and columns labels considering ``subset``. + + .. versionchanged:: 1.3.0 + + .. versionchanged:: 1.4.0 + + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Apply to each column (``axis=0`` or ``'index'``), to each row + (``axis=1`` or ``'columns'``), or to the entire DataFrame at once + with ``axis=None``. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.map_index: Apply a CSS-styling function to headers elementwise. + Styler.apply_index: Apply a CSS-styling function to headers level-wise. + Styler.map: Apply a CSS-styling function elementwise. + + Notes + ----- + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, + if nothing is to be applied to that element, an empty string or ``None``. + + This is similar to ``DataFrame.apply``, except that ``axis=None`` + applies the function to the entire DataFrame at once, + rather than column-wise or row-wise. + + Examples + -------- + >>> def highlight_max(x, color): + ... return np.where(x == np.nanmax(x.to_numpy()), f"color: {color};", None) + >>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"]) + >>> df.style.apply(highlight_max, color='red') # doctest: +SKIP + >>> df.style.apply(highlight_max, color='blue', axis=1) # doctest: +SKIP + >>> df.style.apply(highlight_max, color='green', axis=None) # doctest: +SKIP + + Using ``subset`` to restrict application to a single column or multiple columns + + >>> df.style.apply(highlight_max, color='red', subset="A") + ... # doctest: +SKIP + >>> df.style.apply(highlight_max, color='red', subset=["A", "B"]) + ... # doctest: +SKIP + + Using a 2d input to ``subset`` to select rows in addition to columns + + >>> df.style.apply(highlight_max, color='red', subset=([0, 1, 2], slice(None))) + ... # doctest: +SKIP + >>> df.style.apply(highlight_max, color='red', subset=(slice(0, 5, 2), "A")) + ... # doctest: +SKIP + + Using a function which returns a Series / DataFrame of unequal length but + containing valid index labels + + >>> df = pd.DataFrame([[1, 2], [3, 4], [4, 6]], index=["A1", "A2", "Total"]) + >>> total_style = pd.Series("font-weight: bold;", index=["Total"]) + >>> df.style.apply(lambda s: total_style) # doctest: +SKIP + + See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for + more details. + """ + self._todo.append( + (lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs) + ) + return self + + def _apply_index( + self, + func: Callable, + axis: Axis = 0, + level: Level | list[Level] | None = None, + method: str = "apply", + **kwargs, + ) -> Styler: + axis = self.data._get_axis_number(axis) + obj = self.index if axis == 0 else self.columns + + levels_ = refactor_levels(level, obj) + data = DataFrame(obj.to_list()).loc[:, levels_] + + if method == "apply": + result = data.apply(func, axis=0, **kwargs) + elif method == "map": + result = data.map(func, **kwargs) + + self._update_ctx_header(result, axis) + return self + + @doc( + this="apply", + wise="level-wise", + alt="map", + altwise="elementwise", + func="take a Series and return a string array of the same length", + input_note="the index as a Series, if an Index, or a level of a MultiIndex", + output_note="an identically sized array of CSS styles as strings", + var="s", + ret='np.where(s == "B", "background-color: yellow;", "")', + ret2='["background-color: yellow;" if "x" in v else "" for v in s]', + ) + def apply_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function to the index or column headers, {wise}. + + Updates the HTML representation with the result. + + .. versionadded:: 1.4.0 + + .. versionadded:: 2.1.0 + Styler.applymap_index was deprecated and renamed to Styler.map_index. + + Parameters + ---------- + func : function + ``func`` should {func}. + axis : {{0, 1, "index", "columns"}} + The headers over which to apply the function. + level : int, str, list, optional + If index is MultiIndex the level(s) over which to apply the function. + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.{alt}_index: Apply a CSS-styling function to headers {altwise}. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. + Styler.map: Apply a CSS-styling function elementwise. + + Notes + ----- + Each input to ``func`` will be {input_note}. The output of ``func`` should be + {output_note}, in the format 'attribute: value; attribute2: value2; ...' + or, if nothing is to be applied to that element, an empty string or ``None``. + + Examples + -------- + Basic usage to conditionally highlight values in the index. + + >>> df = pd.DataFrame([[1,2], [3,4]], index=["A", "B"]) + >>> def color_b(s): + ... return {ret} + >>> df.style.{this}_index(color_b) # doctest: +SKIP + + .. figure:: ../../_static/style/appmaphead1.png + + Selectively applying to specific levels of MultiIndex columns. + + >>> midx = pd.MultiIndex.from_product([['ix', 'jy'], [0, 1], ['x3', 'z4']]) + >>> df = pd.DataFrame([np.arange(8)], columns=midx) + >>> def highlight_x({var}): + ... return {ret2} + >>> df.style.{this}_index(highlight_x, axis="columns", level=[0, 2]) + ... # doctest: +SKIP + + .. figure:: ../../_static/style/appmaphead2.png + """ + self._todo.append( + ( + lambda instance: getattr(instance, "_apply_index"), + (func, axis, level, "apply"), + kwargs, + ) + ) + return self + + @doc( + apply_index, + this="map", + wise="elementwise", + alt="apply", + altwise="level-wise", + func="take a scalar and return a string", + input_note="an index value, if an Index, or a level value of a MultiIndex", + output_note="CSS styles as a string", + var="v", + ret='"background-color: yellow;" if v == "B" else None', + ret2='"background-color: yellow;" if "x" in v else None', + ) + def map_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + self._todo.append( + ( + lambda instance: getattr(instance, "_apply_index"), + (func, axis, level, "map"), + kwargs, + ) + ) + return self + + def applymap_index( + self, + func: Callable, + axis: AxisInt | str = 0, + level: Level | list[Level] | None = None, + **kwargs, + ) -> Styler: + """ + Apply a CSS-styling function to the index or column headers, elementwise. + + .. deprecated:: 2.1.0 + + Styler.applymap_index has been deprecated. Use Styler.map_index instead. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + axis : {{0, 1, "index", "columns"}} + The headers over which to apply the function. + level : int, str, list, optional + If index is MultiIndex the level(s) over which to apply the function. + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + """ + warnings.warn( + "Styler.applymap_index has been deprecated. Use Styler.map_index instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map_index(func, axis, level, **kwargs) + + def _map(self, func: Callable, subset: Subset | None = None, **kwargs) -> Styler: + func = partial(func, **kwargs) # map doesn't take kwargs? + if subset is None: + subset = IndexSlice[:] + subset = non_reducing_slice(subset) + result = self.data.loc[subset].map(func) + self._update_ctx(result) + return self + + @Substitution(subset=subset_args) + def map(self, func: Callable, subset: Subset | None = None, **kwargs) -> Styler: + """ + Apply a CSS-styling function elementwise. + + Updates the HTML representation with the result. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + + See Also + -------- + Styler.map_index: Apply a CSS-styling function to headers elementwise. + Styler.apply_index: Apply a CSS-styling function to headers level-wise. + Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. + + Notes + ----- + The elements of the output of ``func`` should be CSS styles as strings, in the + format 'attribute: value; attribute2: value2; ...' or, + if nothing is to be applied to that element, an empty string or ``None``. + + Examples + -------- + >>> def color_negative(v, color): + ... return f"color: {color};" if v < 0 else None + >>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"]) + >>> df.style.map(color_negative, color='red') # doctest: +SKIP + + Using ``subset`` to restrict application to a single column or multiple columns + + >>> df.style.map(color_negative, color='red', subset="A") + ... # doctest: +SKIP + >>> df.style.map(color_negative, color='red', subset=["A", "B"]) + ... # doctest: +SKIP + + Using a 2d input to ``subset`` to select rows in addition to columns + + >>> df.style.map(color_negative, color='red', + ... subset=([0,1,2], slice(None))) # doctest: +SKIP + >>> df.style.map(color_negative, color='red', subset=(slice(0,5,2), "A")) + ... # doctest: +SKIP + + See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for + more details. + """ + self._todo.append( + (lambda instance: getattr(instance, "_map"), (func, subset), kwargs) + ) + return self + + @Substitution(subset=subset_args) + def applymap( + self, func: Callable, subset: Subset | None = None, **kwargs + ) -> Styler: + """ + Apply a CSS-styling function elementwise. + + .. deprecated:: 2.1.0 + + Styler.applymap has been deprecated. Use Styler.map instead. + + Parameters + ---------- + func : function + ``func`` should take a scalar and return a string. + %(subset)s + **kwargs : dict + Pass along to ``func``. + + Returns + ------- + Styler + """ + warnings.warn( + "Styler.applymap has been deprecated. Use Styler.map instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self.map(func, subset, **kwargs) + + def set_table_attributes(self, attributes: str) -> Styler: + """ + Set the table attributes added to the ```` HTML element. + + These are items in addition to automatic (by default) ``id`` attribute. + + Parameters + ---------- + attributes : str + + Returns + ------- + Styler + + See Also + -------- + Styler.set_table_styles: Set the table styles included within the `` block + + Parameters + ---------- + sparsify_index : bool + Whether index_headers section will add rowspan attributes (>1) to elements. + + Returns + ------- + body : list + The associated HTML elements needed for template rendering. + """ + rlabels = self.data.index.tolist() + if not isinstance(self.data.index, MultiIndex): + rlabels = [[x] for x in rlabels] + + body: list = [] + visible_row_count: int = 0 + for r, row_tup in [ + z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows + ]: + visible_row_count += 1 + if self._check_trim( + visible_row_count, + max_rows, + body, + "row", + ): + break + + body_row = self._generate_body_row( + (r, row_tup, rlabels), max_cols, idx_lengths + ) + body.append(body_row) + return body + + def _check_trim( + self, + count: int, + max: int, + obj: list, + element: str, + css: str | None = None, + value: str = "...", + ) -> bool: + """ + Indicates whether to break render loops and append a trimming indicator + + Parameters + ---------- + count : int + The loop count of previous visible items. + max : int + The allowable rendered items in the loop. + obj : list + The current render collection of the rendered items. + element : str + The type of element to append in the case a trimming indicator is needed. + css : str, optional + The css to add to the trimming indicator element. + value : str, optional + The value of the elements display if necessary. + + Returns + ------- + result : bool + Whether a trimming element was required and appended. + """ + if count > max: + if element == "row": + obj.append(self._generate_trimmed_row(max)) + else: + obj.append(_element(element, css, value, True, attributes="")) + return True + return False + + def _generate_trimmed_row(self, max_cols: int) -> list: + """ + When a render has too many rows we generate a trimming row containing "..." + + Parameters + ---------- + max_cols : int + Number of permissible columns + + Returns + ------- + list of elements + """ + index_headers = [ + _element( + "th", + ( + f"{self.css['row_heading']} {self.css['level']}{c} " + f"{self.css['row_trim']}" + ), + "...", + not self.hide_index_[c], + attributes="", + ) + for c in range(self.data.index.nlevels) + ] + + data: list = [] + visible_col_count: int = 0 + for c, _ in enumerate(self.columns): + data_element_visible = c not in self.hidden_columns + if data_element_visible: + visible_col_count += 1 + if self._check_trim( + visible_col_count, + max_cols, + data, + "td", + f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}", + ): + break + + data.append( + _element( + "td", + f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}", + "...", + data_element_visible, + attributes="", + ) + ) + + return index_headers + data + + def _generate_body_row( + self, + iter: tuple, + max_cols: int, + idx_lengths: dict, + ): + """ + Generate a regular row for the body section of appropriate format. + + +--------------------------------------------+---------------------------+ + | index_header_0 ... index_header_n | data_by_column ... | + +--------------------------------------------+---------------------------+ + + Parameters + ---------- + iter : tuple + Iterable from outer scope: row number, row data tuple, row index labels. + max_cols : int + Number of permissible columns. + idx_lengths : dict + A map of the sparsification structure of the index + + Returns + ------- + list of elements + """ + r, row_tup, rlabels = iter + + index_headers = [] + for c, value in enumerate(rlabels[r]): + header_element_visible = ( + _is_visible(r, c, idx_lengths) and not self.hide_index_[c] + ) + header_element = _element( + "th", + ( + f"{self.css['row_heading']} {self.css['level']}{c} " + f"{self.css['row']}{r}" + ), + value, + header_element_visible, + display_value=self._display_funcs_index[(r, c)](value), + attributes=( + f'rowspan="{idx_lengths.get((c, r), 0)}"' + if idx_lengths.get((c, r), 0) > 1 + else "" + ), + ) + + if self.cell_ids: + header_element[ + "id" + ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given + if ( + header_element_visible + and (r, c) in self.ctx_index + and self.ctx_index[r, c] + ): + # always add id if a style is specified + header_element["id"] = f"{self.css['level']}{c}_{self.css['row']}{r}" + self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append( + f"{self.css['level']}{c}_{self.css['row']}{r}" + ) + + index_headers.append(header_element) + + data: list = [] + visible_col_count: int = 0 + for c, value in enumerate(row_tup[1:]): + data_element_visible = ( + c not in self.hidden_columns and r not in self.hidden_rows + ) + if data_element_visible: + visible_col_count += 1 + if self._check_trim( + visible_col_count, + max_cols, + data, + "td", + f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}", + ): + break + + # add custom classes from cell context + cls = "" + if (r, c) in self.cell_context: + cls = " " + self.cell_context[r, c] + + data_element = _element( + "td", + ( + f"{self.css['data']} {self.css['row']}{r} " + f"{self.css['col']}{c}{cls}" + ), + value, + data_element_visible, + attributes="", + display_value=self._display_funcs[(r, c)](value), + ) + + if self.cell_ids: + data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}" + if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]: + # always add id if needed due to specified style + data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}" + self.cellstyle_map[tuple(self.ctx[r, c])].append( + f"{self.css['row']}{r}_{self.css['col']}{c}" + ) + + data.append(data_element) + + return index_headers + data + + def _translate_latex(self, d: dict, clines: str | None) -> None: + r""" + Post-process the default render dict for the LaTeX template format. + + Processing items included are: + - Remove hidden columns from the non-headers part of the body. + - Place cellstyles directly in td cells rather than use cellstyle_map. + - Remove hidden indexes or reinsert missing th elements if part of multiindex + or multirow sparsification (so that \multirow and \multicol work correctly). + """ + index_levels = self.index.nlevels + visible_index_level_n = index_levels - sum(self.hide_index_) + d["head"] = [ + [ + {**col, "cellstyle": self.ctx_columns[r, c - visible_index_level_n]} + for c, col in enumerate(row) + if col["is_visible"] + ] + for r, row in enumerate(d["head"]) + ] + + def _concatenated_visible_rows(obj, n, row_indices): + """ + Extract all visible row indices recursively from concatenated stylers. + """ + row_indices.extend( + [r + n for r in range(len(obj.index)) if r not in obj.hidden_rows] + ) + n += len(obj.index) + for concatenated in obj.concatenated: + n = _concatenated_visible_rows(concatenated, n, row_indices) + return n + + def concatenated_visible_rows(obj): + row_indices: list[int] = [] + _concatenated_visible_rows(obj, 0, row_indices) + # TODO try to consolidate the concat visible rows + # methods to a single function / recursion for simplicity + return row_indices + + body = [] + for r, row in zip(concatenated_visible_rows(self), d["body"]): + # note: cannot enumerate d["body"] because rows were dropped if hidden + # during _translate_body so must zip to acquire the true r-index associated + # with the ctx obj which contains the cell styles. + if all(self.hide_index_): + row_body_headers = [] + else: + row_body_headers = [ + { + **col, + "display_value": col["display_value"] + if col["is_visible"] + else "", + "cellstyle": self.ctx_index[r, c], + } + for c, col in enumerate(row[:index_levels]) + if (col["type"] == "th" and not self.hide_index_[c]) + ] + + row_body_cells = [ + {**col, "cellstyle": self.ctx[r, c]} + for c, col in enumerate(row[index_levels:]) + if (col["is_visible"] and col["type"] == "td") + ] + + body.append(row_body_headers + row_body_cells) + d["body"] = body + + # clines are determined from info on index_lengths and hidden_rows and input + # to a dict defining which row clines should be added in the template. + if clines not in [ + None, + "all;data", + "all;index", + "skip-last;data", + "skip-last;index", + ]: + raise ValueError( + f"`clines` value of {clines} is invalid. Should either be None or one " + f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'." + ) + if clines is not None: + data_len = len(row_body_cells) if "data" in clines and d["body"] else 0 + + d["clines"] = defaultdict(list) + visible_row_indexes: list[int] = [ + r for r in range(len(self.data.index)) if r not in self.hidden_rows + ] + visible_index_levels: list[int] = [ + i for i in range(index_levels) if not self.hide_index_[i] + ] + for rn, r in enumerate(visible_row_indexes): + for lvln, lvl in enumerate(visible_index_levels): + if lvl == index_levels - 1 and "skip-last" in clines: + continue + idx_len = d["index_lengths"].get((lvl, r), None) + if idx_len is not None: # i.e. not a sparsified entry + d["clines"][rn + idx_len].append( + f"\\cline{{{lvln+1}-{len(visible_index_levels)+data_len}}}" + ) + + def format( + self, + formatter: ExtFormatter | None = None, + subset: Subset | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, + ) -> StylerRenderer: + r""" + Format the text display value of cells. + + Parameters + ---------- + formatter : str, callable, dict or None + Object to define how values are displayed. See notes. + subset : label, array-like, IndexSlice, optional + A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input + or single key, to `DataFrame.loc[:, ]` where the columns are + prioritised, to limit ``data`` to *before* applying the function. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied. + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + + .. versionadded:: 1.3.0 + + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + + .. versionadded:: 1.3.0 + + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + + .. versionadded:: 1.3.0 + + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Use 'latex-math' to replace the characters the same way as in 'latex' mode, + except for math substrings, which either are surrounded + by two characters ``$`` or start with the character ``\(`` and + end with ``\)``. Escaping is done before ``formatter``. + + .. versionadded:: 1.3.0 + + hyperlinks : {"html", "latex"}, optional + Convert string patterns containing https://, http://, ftp:// or www. to + HTML tags as clickable URL hyperlinks if "html", or LaTeX \href + commands if "latex". + + .. versionadded:: 1.4.0 + + Returns + ------- + Styler + + See Also + -------- + Styler.format_index: Format the text display value of index labels. + + Notes + ----- + This method assigns a formatting function, ``formatter``, to each cell in the + DataFrame. If ``formatter`` is ``None``, then the default formatter is used. + If a callable then that function should take a data value as input and return + a displayable representation, such as a string. If ``formatter`` is + given as a string this is assumed to be a valid Python format specification + and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given, + keys should correspond to column names, and values should be string or + callable, as above. + + The default formatter currently expresses floats and complex numbers with the + pandas display precision unless using the ``precision`` argument here. The + default formatter does not adjust the representation of missing values unless + the ``na_rep`` argument is used. + + The ``subset`` argument defines which region to apply the formatting function + to. If the ``formatter`` argument is given in dict form but does not include + all columns within the subset then these columns will have the default formatter + applied. Any columns in the formatter dict excluded from the subset will + be ignored. + + When using a ``formatter`` string the dtypes must be compatible, otherwise a + `ValueError` will be raised. + + When instantiating a Styler, default formatting can be applied be setting the + ``pandas.options``: + + - ``styler.format.formatter``: default None. + - ``styler.format.na_rep``: default None. + - ``styler.format.precision``: default 6. + - ``styler.format.decimal``: default ".". + - ``styler.format.thousands``: default None. + - ``styler.format.escape``: default None. + + .. warning:: + `Styler.format` is ignored when using the output format `Styler.to_excel`, + since Excel and Python have inherrently different formatting structures. + However, it is possible to use the `number-format` pseudo CSS attribute + to force Excel permissible formatting. See examples. + + Examples + -------- + Using ``na_rep`` and ``precision`` with the default ``formatter`` + + >>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]]) + >>> df.style.format(na_rep='MISS', precision=3) # doctest: +SKIP + 0 1 2 + 0 MISS 1.000 A + 1 2.000 MISS 3.000 + + Using a ``formatter`` specification on consistent column dtypes + + >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) # doctest: +SKIP + 0 1 2 + 0 MISS 1.00 A + 1 2.00 MISS 3.000000 + + Using the default ``formatter`` for unspecified columns + + >>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'}, na_rep='MISS', precision=1) + ... # doctest: +SKIP + 0 1 2 + 0 MISS £ 1.0 A + 1 2.00 MISS 3.0 + + Multiple ``na_rep`` or ``precision`` specifications under the default + ``formatter``. + + >>> (df.style.format(na_rep='MISS', precision=1, subset=[0]) + ... .format(na_rep='PASS', precision=2, subset=[1, 2])) # doctest: +SKIP + 0 1 2 + 0 MISS 1.00 A + 1 2.0 PASS 3.00 + + Using a callable ``formatter`` function. + + >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' + >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS') + ... # doctest: +SKIP + 0 1 2 + 0 MISS 1.0000 STRING + 1 2.0 MISS FLOAT + + Using a ``formatter`` with HTML ``escape`` and ``na_rep``. + + >>> df = pd.DataFrame([['
', '"A&B"', None]]) + >>> s = df.style.format( + ... '
{0}', escape="html", na_rep="NA" + ... ) + >>> s.to_html() # doctest: +SKIP + ... +
+ + + ... + + Using a ``formatter`` with ``escape`` in 'latex' mode. + + >>> df = pd.DataFrame([["123"], ["~ ^"], ["$%#"]]) + >>> df.style.format("\\textbf{{{}}}", escape="latex").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \textbf{123} \\ + 1 & \textbf{\textasciitilde \space \textasciicircum } \\ + 2 & \textbf{\$\%\#} \\ + \end{tabular} + + Applying ``escape`` in 'latex-math' mode. In the example below + we enter math mode using the character ``$``. + + >>> df = pd.DataFrame([[r"$\sum_{i=1}^{10} a_i$ a~b $\alpha \ + ... = \frac{\beta}{\zeta^2}$"], ["%#^ $ \$x^2 $"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & $\sum_{i=1}^{10} a_i$ a\textasciitilde b $\alpha = \frac{\beta}{\zeta^2}$ \\ + 1 & \%\#\textasciicircum \space $ \$x^2 $ \\ + \end{tabular} + + We can use the character ``\(`` to enter math mode and the character ``\)`` + to close math mode. + + >>> df = pd.DataFrame([[r"\(\sum_{i=1}^{10} a_i\) a~b \(\alpha \ + ... = \frac{\beta}{\zeta^2}\)"], ["%#^ \( \$x^2 \)"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \(\sum_{i=1}^{10} a_i\) a\textasciitilde b \(\alpha + = \frac{\beta}{\zeta^2}\) \\ + 1 & \%\#\textasciicircum \space \( \$x^2 \) \\ + \end{tabular} + + If we have in one DataFrame cell a combination of both shorthands + for math formulas, the shorthand with the sign ``$`` will be applied. + + >>> df = pd.DataFrame([[r"\( x^2 \) $x^2$"], \ + ... [r"$\frac{\beta}{\zeta}$ \(\frac{\beta}{\zeta}\)"]]) + >>> df.style.format(escape="latex-math").to_latex() + ... # doctest: +SKIP + \begin{tabular}{ll} + & 0 \\ + 0 & \textbackslash ( x\textasciicircum 2 \textbackslash ) $x^2$ \\ + 1 & $\frac{\beta}{\zeta}$ \textbackslash (\textbackslash + frac\{\textbackslash beta\}\{\textbackslash zeta\}\textbackslash ) \\ + \end{tabular} + + Pandas defines a `number-format` pseudo CSS attribute instead of the `.format` + method to create `to_excel` permissible formatting. Note that semi-colons are + CSS protected characters but used as separators in Excel's format string. + Replace semi-colons with the section separator character (ASCII-245) when + defining the formatting here. + + >>> df = pd.DataFrame({"A": [1, 0, -1]}) + >>> pseudo_css = "number-format: 0§[Red](0)§-§@;" + >>> filename = "formatted_file.xlsx" + >>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP + + .. figure:: ../../_static/style/format_excel_css.png + """ + if all( + ( + formatter is None, + subset is None, + precision is None, + decimal == ".", + thousands is None, + na_rep is None, + escape is None, + hyperlinks is None, + ) + ): + self._display_funcs.clear() + return self # clear the formatter / revert to default and avoid looping + + subset = slice(None) if subset is None else subset + subset = non_reducing_slice(subset) + data = self.data.loc[subset] + + if not isinstance(formatter, dict): + formatter = {col: formatter for col in data.columns} + + cis = self.columns.get_indexer_for(data.columns) + ris = self.index.get_indexer_for(data.index) + for ci in cis: + format_func = _maybe_wrap_formatter( + formatter.get(self.columns[ci]), + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, + hyperlinks=hyperlinks, + ) + for ri in ris: + self._display_funcs[(ri, ci)] = format_func + + return self + + def format_index( + self, + formatter: ExtFormatter | None = None, + axis: Axis = 0, + level: Level | list[Level] | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, + ) -> StylerRenderer: + r""" + Format the text display value of index labels or column headers. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + formatter : str, callable, dict or None + Object to define how values are displayed. See notes. + axis : {0, "index", 1, "columns"} + Whether to apply the formatter to the index or column headers. + level : int, str, list + The level(s) over which to apply the generic formatter. + na_rep : str, optional + Representation for missing values. + If ``na_rep`` is None, no special formatting is applied. + precision : int, optional + Floating point precision to use for display purposes, if not determined by + the specified ``formatter``. + decimal : str, default "." + Character used as decimal separator for floats, complex and integers. + thousands : str, optional, default None + Character used as thousands separator for floats, complex and integers. + escape : str, optional + Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` + in cell display string with HTML-safe sequences. + Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, + ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with + LaTeX-safe sequences. + Escaping is done before ``formatter``. + hyperlinks : {"html", "latex"}, optional + Convert string patterns containing https://, http://, ftp:// or www. to + HTML tags as clickable URL hyperlinks if "html", or LaTeX \href + commands if "latex". + + Returns + ------- + Styler + + See Also + -------- + Styler.format: Format the text display value of data cells. + + Notes + ----- + This method assigns a formatting function, ``formatter``, to each level label + in the DataFrame's index or column headers. If ``formatter`` is ``None``, + then the default formatter is used. + If a callable then that function should take a label value as input and return + a displayable representation, such as a string. If ``formatter`` is + given as a string this is assumed to be a valid Python format specification + and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given, + keys should correspond to MultiIndex level numbers or names, and values should + be string or callable, as above. + + The default formatter currently expresses floats and complex numbers with the + pandas display precision unless using the ``precision`` argument here. The + default formatter does not adjust the representation of missing values unless + the ``na_rep`` argument is used. + + The ``level`` argument defines which levels of a MultiIndex to apply the + method to. If the ``formatter`` argument is given in dict form but does + not include all levels within the level argument then these unspecified levels + will have the default formatter applied. Any levels in the formatter dict + specifically excluded from the level argument will be ignored. + + When using a ``formatter`` string the dtypes must be compatible, otherwise a + `ValueError` will be raised. + + .. warning:: + `Styler.format_index` is ignored when using the output format + `Styler.to_excel`, since Excel and Python have inherrently different + formatting structures. + However, it is possible to use the `number-format` pseudo CSS attribute + to force Excel permissible formatting. See documentation for `Styler.format`. + + Examples + -------- + Using ``na_rep`` and ``precision`` with the default ``formatter`` + + >>> df = pd.DataFrame([[1, 2, 3]], columns=[2.0, np.nan, 4.0]) + >>> df.style.format_index(axis=1, na_rep='MISS', precision=3) # doctest: +SKIP + 2.000 MISS 4.000 + 0 1 2 3 + + Using a ``formatter`` specification on consistent dtypes in a level + + >>> df.style.format_index('{:.2f}', axis=1, na_rep='MISS') # doctest: +SKIP + 2.00 MISS 4.00 + 0 1 2 3 + + Using the default ``formatter`` for unspecified levels + + >>> df = pd.DataFrame([[1, 2, 3]], + ... columns=pd.MultiIndex.from_arrays([["a", "a", "b"],[2, np.nan, 4]])) + >>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1) + ... # doctest: +SKIP + A B + 2.0 nan 4.0 + 0 1 2 3 + + Using a callable ``formatter`` function. + + >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT' + >>> df.style.format_index(func, axis=1, na_rep='MISS') + ... # doctest: +SKIP + STRING STRING + FLOAT MISS FLOAT + 0 1 2 3 + + Using a ``formatter`` with HTML ``escape`` and ``na_rep``. + + >>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None]) + >>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA") + ... # doctest: +SKIP + + + or element. + """ + if "display_value" not in kwargs: + kwargs["display_value"] = value + return { + "type": html_element, + "value": value, + "class": html_class, + "is_visible": is_visible, + **kwargs, + } + + +def _get_trimming_maximums( + rn, + cn, + max_elements, + max_rows=None, + max_cols=None, + scaling_factor: float = 0.8, +) -> tuple[int, int]: + """ + Recursively reduce the number of rows and columns to satisfy max elements. + + Parameters + ---------- + rn, cn : int + The number of input rows / columns + max_elements : int + The number of allowable elements + max_rows, max_cols : int, optional + Directly specify an initial maximum rows or columns before compression. + scaling_factor : float + Factor at which to reduce the number of rows / columns to fit. + + Returns + ------- + rn, cn : tuple + New rn and cn values that satisfy the max_elements constraint + """ + + def scale_down(rn, cn): + if cn >= rn: + return rn, int(cn * scaling_factor) + else: + return int(rn * scaling_factor), cn + + if max_rows: + rn = max_rows if rn > max_rows else rn + if max_cols: + cn = max_cols if cn > max_cols else cn + + while rn * cn > max_elements: + rn, cn = scale_down(rn, cn) + + return rn, cn + + +def _get_level_lengths( + index: Index, + sparsify: bool, + max_index: int, + hidden_elements: Sequence[int] | None = None, +): + """ + Given an index, find the level length for each element. + + Parameters + ---------- + index : Index + Index or columns to determine lengths of each element + sparsify : bool + Whether to hide or show each distinct element in a MultiIndex + max_index : int + The maximum number of elements to analyse along the index due to trimming + hidden_elements : sequence of int + Index positions of elements hidden from display in the index affecting + length + + Returns + ------- + Dict : + Result is a dictionary of (level, initial_position): span + """ + if isinstance(index, MultiIndex): + levels = index._format_multi(sparsify=lib.no_default, include_names=False) + else: + levels = index._format_flat(include_name=False) + + if hidden_elements is None: + hidden_elements = [] + + lengths = {} + if not isinstance(index, MultiIndex): + for i, value in enumerate(levels): + if i not in hidden_elements: + lengths[(0, i)] = 1 + return lengths + + for i, lvl in enumerate(levels): + visible_row_count = 0 # used to break loop due to display trimming + for j, row in enumerate(lvl): + if visible_row_count > max_index: + break + if not sparsify: + # then lengths will always equal 1 since no aggregation. + if j not in hidden_elements: + lengths[(i, j)] = 1 + visible_row_count += 1 + elif (row is not lib.no_default) and (j not in hidden_elements): + # this element has not been sparsified so must be the start of section + last_label = j + lengths[(i, last_label)] = 1 + visible_row_count += 1 + elif row is not lib.no_default: + # even if the above is hidden, keep track of it in case length > 1 and + # later elements are visible + last_label = j + lengths[(i, last_label)] = 0 + elif j not in hidden_elements: + # then element must be part of sparsified section and is visible + visible_row_count += 1 + if visible_row_count > max_index: + break # do not add a length since the render trim limit reached + if lengths[(i, last_label)] == 0: + # if previous iteration was first-of-section but hidden then offset + last_label = j + lengths[(i, last_label)] = 1 + else: + # else add to previous iteration + lengths[(i, last_label)] += 1 + + non_zero_lengths = { + element: length for element, length in lengths.items() if length >= 1 + } + + return non_zero_lengths + + +def _is_visible(idx_row, idx_col, lengths) -> bool: + """ + Index -> {(idx_row, idx_col): bool}). + """ + return (idx_col, idx_row) in lengths + + +def format_table_styles(styles: CSSStyles) -> CSSStyles: + """ + looks for multiple CSS selectors and separates them: + [{'selector': 'td, th', 'props': 'a:v;'}] + ---> [{'selector': 'td', 'props': 'a:v;'}, + {'selector': 'th', 'props': 'a:v;'}] + """ + return [ + {"selector": selector, "props": css_dict["props"]} + for css_dict in styles + for selector in css_dict["selector"].split(",") + ] + + +def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any: + """ + Format the display of a value + + Parameters + ---------- + x : Any + Input variable to be formatted + precision : Int + Floating point precision used if ``x`` is float or complex. + thousands : bool, default False + Whether to group digits with thousands separated with ",". + + Returns + ------- + value : Any + Matches input type, or string if input is float or complex or int with sep. + """ + if is_float(x) or is_complex(x): + return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}" + elif is_integer(x): + return f"{x:,}" if thousands else str(x) + return x + + +def _wrap_decimal_thousands( + formatter: Callable, decimal: str, thousands: str | None +) -> Callable: + """ + Takes a string formatting function and wraps logic to deal with thousands and + decimal parameters, in the case that they are non-standard and that the input + is a (float, complex, int). + """ + + def wrapper(x): + if is_float(x) or is_integer(x) or is_complex(x): + if decimal != "." and thousands is not None and thousands != ",": + return ( + formatter(x) + .replace(",", "§_§-") # rare string to avoid "," <-> "." clash. + .replace(".", decimal) + .replace("§_§-", thousands) + ) + elif decimal != "." and (thousands is None or thousands == ","): + return formatter(x).replace(".", decimal) + elif decimal == "." and thousands is not None and thousands != ",": + return formatter(x).replace(",", thousands) + return formatter(x) + + return wrapper + + +def _str_escape(x, escape): + """if escaping: only use on str, else return input""" + if isinstance(x, str): + if escape == "html": + return escape_html(x) + elif escape == "latex": + return _escape_latex(x) + elif escape == "latex-math": + return _escape_latex_math(x) + else: + raise ValueError( + f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, \ +got {escape}" + ) + return x + + +def _render_href(x, format): + """uses regex to detect a common URL pattern and converts to href tag in format.""" + if isinstance(x, str): + if format == "html": + href = '{0}' + elif format == "latex": + href = r"\href{{{0}}}{{{0}}}" + else: + raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'") + pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+" + return re.sub(pat, lambda m: href.format(m.group(0)), x) + return x + + +def _maybe_wrap_formatter( + formatter: BaseFormatter | None = None, + na_rep: str | None = None, + precision: int | None = None, + decimal: str = ".", + thousands: str | None = None, + escape: str | None = None, + hyperlinks: str | None = None, +) -> Callable: + """ + Allows formatters to be expressed as str, callable or None, where None returns + a default formatting function. wraps with na_rep, and precision where they are + available. + """ + # Get initial func from input string, input callable, or from default factory + if isinstance(formatter, str): + func_0 = lambda x: formatter.format(x) + elif callable(formatter): + func_0 = formatter + elif formatter is None: + precision = ( + get_option("styler.format.precision") if precision is None else precision + ) + func_0 = partial( + _default_formatter, precision=precision, thousands=(thousands is not None) + ) + else: + raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") + + # Replace chars if escaping + if escape is not None: + func_1 = lambda x: func_0(_str_escape(x, escape=escape)) + else: + func_1 = func_0 + + # Replace decimals and thousands if non-standard inputs detected + if decimal != "." or (thousands is not None and thousands != ","): + func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands) + else: + func_2 = func_1 + + # Render links + if hyperlinks is not None: + func_3 = lambda x: func_2(_render_href(x, format=hyperlinks)) + else: + func_3 = func_2 + + # Replace missing values if na_rep + if na_rep is None: + return func_3 + else: + return lambda x: na_rep if (isna(x) is True) else func_3(x) + + +def non_reducing_slice(slice_: Subset): + """ + Ensure that a slice doesn't reduce to a Series or Scalar. + + Any user-passed `subset` should have this called on it + to make sure we're always working with DataFrames. + """ + # default to column slice, like DataFrame + # ['A', 'B'] -> IndexSlices[:, ['A', 'B']] + kinds = (ABCSeries, np.ndarray, Index, list, str) + if isinstance(slice_, kinds): + slice_ = IndexSlice[:, slice_] + + def pred(part) -> bool: + """ + Returns + ------- + bool + True if slice does *not* reduce, + False if `part` is a tuple. + """ + # true when slice does *not* reduce, False when part is a tuple, + # i.e. MultiIndex slice + if isinstance(part, tuple): + # GH#39421 check for sub-slice: + return any((isinstance(s, slice) or is_list_like(s)) for s in part) + else: + return isinstance(part, slice) or is_list_like(part) + + if not is_list_like(slice_): + if not isinstance(slice_, slice): + # a 1-d slice, like df.loc[1] + slice_ = [[slice_]] + else: + # slice(a, b, c) + slice_ = [slice_] # to tuplize later + else: + # error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute + # "__iter__" (not iterable) -> is specifically list_like in conditional + slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr] + return tuple(slice_) + + +def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: + """ + Convert css-string to sequence of tuples format if needed. + 'color:red; border:1px solid black;' -> [('color', 'red'), + ('border','1px solid red')] + """ + if isinstance(style, str): + s = style.split(";") + try: + return [ + (x.split(":")[0].strip(), x.split(":")[1].strip()) + for x in s + if x.strip() != "" + ] + except IndexError: + raise ValueError( + "Styles supplied as string must follow CSS rule formats, " + f"for example 'attr: val;'. '{style}' was given." + ) + return style + + +def refactor_levels( + level: Level | list[Level] | None, + obj: Index, +) -> list[int]: + """ + Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``. + + Parameters + ---------- + level : int, str, list + Original ``level`` arg supplied to above methods. + obj: + Either ``self.index`` or ``self.columns`` + + Returns + ------- + list : refactored arg with a list of levels to hide + """ + if level is None: + levels_: list[int] = list(range(obj.nlevels)) + elif isinstance(level, int): + levels_ = [level] + elif isinstance(level, str): + levels_ = [obj._get_level_number(level)] + elif isinstance(level, list): + levels_ = [ + obj._get_level_number(lev) if not isinstance(lev, int) else lev + for lev in level + ] + else: + raise ValueError("`level` must be of type `int`, `str` or list of such") + return levels_ + + +class Tooltips: + """ + An extension to ``Styler`` that allows for and manipulates tooltips on hover + of ``
<div></div>"A&B"NA$ "A"$ A&BNA + ... + + Using a ``formatter`` with LaTeX ``escape``. + + >>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"]) + >>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex() + ... # doctest: +SKIP + \begin{tabular}{lrrr} + {} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\ + 0 & 1 & 2 & 3 \\ + \end{tabular} + """ + axis = self.data._get_axis_number(axis) + if axis == 0: + display_funcs_, obj = self._display_funcs_index, self.index + else: + display_funcs_, obj = self._display_funcs_columns, self.columns + levels_ = refactor_levels(level, obj) + + if all( + ( + formatter is None, + level is None, + precision is None, + decimal == ".", + thousands is None, + na_rep is None, + escape is None, + hyperlinks is None, + ) + ): + display_funcs_.clear() + return self # clear the formatter / revert to default and avoid looping + + if not isinstance(formatter, dict): + formatter = {level: formatter for level in levels_} + else: + formatter = { + obj._get_level_number(level): formatter_ + for level, formatter_ in formatter.items() + } + + for lvl in levels_: + format_func = _maybe_wrap_formatter( + formatter.get(lvl), + na_rep=na_rep, + precision=precision, + decimal=decimal, + thousands=thousands, + escape=escape, + hyperlinks=hyperlinks, + ) + + for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]: + display_funcs_[idx] = format_func + + return self + + def relabel_index( + self, + labels: Sequence | Index, + axis: Axis = 0, + level: Level | list[Level] | None = None, + ) -> StylerRenderer: + r""" + Relabel the index, or column header, keys to display a set of specified values. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + labels : list-like or Index + New labels to display. Must have same length as the underlying values not + hidden. + axis : {"index", 0, "columns", 1} + Apply to the index or columns. + level : int, str, list, optional + The level(s) over which to apply the new labels. If `None` will apply + to all levels of an Index or MultiIndex which are not hidden. + + Returns + ------- + Styler + + See Also + -------- + Styler.format_index: Format the text display value of index or column headers. + Styler.hide: Hide the index, column headers, or specified data from display. + + Notes + ----- + As part of Styler, this method allows the display of an index to be + completely user-specified without affecting the underlying DataFrame data, + index, or column headers. This means that the flexibility of indexing is + maintained whilst the final display is customisable. + + Since Styler is designed to be progressively constructed with method chaining, + this method is adapted to react to the **currently specified hidden elements**. + This is useful because it means one does not have to specify all the new + labels if the majority of an index, or column headers, have already been hidden. + The following produce equivalent display (note the length of ``labels`` in + each case). + + .. code-block:: python + + # relabel first, then hide + df = pd.DataFrame({"col": ["a", "b", "c"]}) + df.style.relabel_index(["A", "B", "C"]).hide([0,1]) + # hide first, then relabel + df = pd.DataFrame({"col": ["a", "b", "c"]}) + df.style.hide([0,1]).relabel_index(["C"]) + + This method should be used, rather than :meth:`Styler.format_index`, in one of + the following cases (see examples): + + - A specified set of labels are required which are not a function of the + underlying index keys. + - The function of the underlying index keys requires a counter variable, + such as those available upon enumeration. + + Examples + -------- + Basic use + + >>> df = pd.DataFrame({"col": ["a", "b", "c"]}) + >>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP + col + A a + B b + C c + + Chaining with pre-hidden elements + + >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP + col + C c + + Using a MultiIndex + + >>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]]) + >>> df = pd.DataFrame({"col": list(range(8))}, index=midx) + >>> styler = df.style # doctest: +SKIP + col + 0 0 0 0 + 1 1 + 1 0 2 + 1 3 + 1 0 0 4 + 1 5 + 1 0 6 + 1 7 + >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0)) + ... # doctest: +SKIP + >>> styler.hide(level=[0,1]) # doctest: +SKIP + >>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP + col + binary6 6 + binary7 7 + + We can also achieve the above by indexing first and then re-labeling + + >>> styler = df.loc[[(1,1,0), (1,1,1)]].style + >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"]) + ... # doctest: +SKIP + col + binary6 6 + binary7 7 + + Defining a formatting function which uses an enumeration counter. Also note + that the value of the index key is passed in the case of string labels so it + can also be inserted into the label, using curly brackets (or double curly + brackets if the string if pre-formatted), + + >>> df = pd.DataFrame({"samples": np.random.rand(10)}) + >>> styler = df.loc[np.random.randint(0,10,3)].style + >>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)]) + ... # doctest: +SKIP + samples + sample1 (5) 0.315811 + sample2 (0) 0.495941 + sample3 (2) 0.067946 + """ + axis = self.data._get_axis_number(axis) + if axis == 0: + display_funcs_, obj = self._display_funcs_index, self.index + hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_ + else: + display_funcs_, obj = self._display_funcs_columns, self.columns + hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_ + visible_len = len(obj) - len(set(hidden_labels)) + if len(labels) != visible_len: + raise ValueError( + "``labels`` must be of length equal to the number of " + f"visible labels along ``axis`` ({visible_len})." + ) + + if level is None: + level = [i for i in range(obj.nlevels) if not hidden_lvls[i]] + levels_ = refactor_levels(level, obj) + + def alias_(x, value): + if isinstance(value, str): + return value.format(x) + return value + + for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]): + if len(levels_) == 1: + idx = (i, levels_[0]) if axis == 0 else (levels_[0], i) + display_funcs_[idx] = partial(alias_, value=labels[ai]) + else: + for aj, lvl in enumerate(levels_): + idx = (i, lvl) if axis == 0 else (lvl, i) + display_funcs_[idx] = partial(alias_, value=labels[ai][aj]) + + return self + + +def _element( + html_element: str, + html_class: str | None, + value: Any, + is_visible: bool, + **kwargs, +) -> dict: + """ + Template to return container with information for a `` cells in the HTML result. + + Parameters + ---------- + css_name: str, default "pd-t" + Name of the CSS class that controls visualisation of tooltips. + css_props: list-like, default; see Notes + List of (attr, value) tuples defining properties of the CSS class. + tooltips: DataFrame, default empty + DataFrame of strings aligned with underlying Styler data for tooltip + display. + + Notes + ----- + The default properties for the tooltip CSS class are: + + - visibility: hidden + - position: absolute + - z-index: 1 + - background-color: black + - color: white + - transform: translate(-20px, -20px) + + Hidden visibility is a key prerequisite to the hover functionality, and should + always be included in any manual properties specification. + """ + + def __init__( + self, + css_props: CSSProperties = [ + ("visibility", "hidden"), + ("position", "absolute"), + ("z-index", 1), + ("background-color", "black"), + ("color", "white"), + ("transform", "translate(-20px, -20px)"), + ], + css_name: str = "pd-t", + tooltips: DataFrame = DataFrame(), + ) -> None: + self.class_name = css_name + self.class_properties = css_props + self.tt_data = tooltips + self.table_styles: CSSStyles = [] + + @property + def _class_styles(self): + """ + Combine the ``_Tooltips`` CSS class name and CSS properties to the format + required to extend the underlying ``Styler`` `table_styles` to allow + tooltips to render in HTML. + + Returns + ------- + styles : List + """ + return [ + { + "selector": f".{self.class_name}", + "props": maybe_convert_css_to_tuples(self.class_properties), + } + ] + + def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str): + """ + For every table data-cell that has a valid tooltip (not None, NaN or + empty string) must create two pseudo CSS entries for the specific + element id which are added to overall table styles: + an on hover visibility change and a content change + dependent upon the user's chosen display string. + + For example: + [{"selector": "T__row1_col1:hover .pd-t", + "props": [("visibility", "visible")]}, + {"selector": "T__row1_col1 .pd-t::after", + "props": [("content", "Some Valid Text String")]}] + + Parameters + ---------- + uuid: str + The uuid of the Styler instance + name: str + The css-name of the class used for styling tooltips + row : int + The row index of the specified tooltip string data + col : int + The col index of the specified tooltip string data + text : str + The textual content of the tooltip to be displayed in HTML. + + Returns + ------- + pseudo_css : List + """ + selector_id = "#T_" + uuid + "_row" + str(row) + "_col" + str(col) + return [ + { + "selector": selector_id + f":hover .{name}", + "props": [("visibility", "visible")], + }, + { + "selector": selector_id + f" .{name}::after", + "props": [("content", f'"{text}"')], + }, + ] + + def _translate(self, styler: StylerRenderer, d: dict): + """ + Mutate the render dictionary to allow for tooltips: + + - Add ```` HTML element to each data cells ``display_value``. Ignores + headers. + - Add table level CSS styles to control pseudo classes. + + Parameters + ---------- + styler_data : DataFrame + Underlying ``Styler`` DataFrame used for reindexing. + uuid : str + The underlying ``Styler`` uuid for CSS id. + d : dict + The dictionary prior to final render + + Returns + ------- + render_dict : Dict + """ + self.tt_data = self.tt_data.reindex_like(styler.data) + if self.tt_data.empty: + return d + + name = self.class_name + mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip + self.table_styles = [ + style + for sublist in [ + self._pseudo_css(styler.uuid, name, i, j, str(self.tt_data.iloc[i, j])) + for i in range(len(self.tt_data.index)) + for j in range(len(self.tt_data.columns)) + if not ( + mask.iloc[i, j] + or i in styler.hidden_rows + or j in styler.hidden_columns + ) + ] + for style in sublist + ] + + if self.table_styles: + # add span class to every cell only if at least 1 non-empty tooltip + for row in d["body"]: + for item in row: + if item["type"] == "td": + item["display_value"] = ( + str(item["display_value"]) + + f'' + ) + d["table_styles"].extend(self._class_styles) + d["table_styles"].extend(self.table_styles) + + return d + + +def _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool: + """ + Indicate whether LaTeX {tabular} should be wrapped with a {table} environment. + + Parses the `table_styles` and detects any selectors which must be included outside + of {tabular}, i.e. indicating that wrapping must occur, and therefore return True, + or if a caption exists and requires similar. + """ + IGNORED_WRAPPERS = ["toprule", "midrule", "bottomrule", "column_format"] + # ignored selectors are included with {tabular} so do not need wrapping + return ( + table_styles is not None + and any(d["selector"] not in IGNORED_WRAPPERS for d in table_styles) + ) or caption is not None + + +def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None: + """ + Return the first 'props' 'value' from ``tables_styles`` identified by ``selector``. + + Examples + -------- + >>> table_styles = [{'selector': 'foo', 'props': [('attr','value')]}, + ... {'selector': 'bar', 'props': [('attr', 'overwritten')]}, + ... {'selector': 'bar', 'props': [('a1', 'baz'), ('a2', 'ignore')]}] + >>> _parse_latex_table_styles(table_styles, selector='bar') + 'baz' + + Notes + ----- + The replacement of "§" with ":" is to avoid the CSS problem where ":" has structural + significance and cannot be used in LaTeX labels, but is often required by them. + """ + for style in table_styles[::-1]: # in reverse for most recently applied style + if style["selector"] == selector: + return str(style["props"][0][1]).replace("§", ":") + return None + + +def _parse_latex_cell_styles( + latex_styles: CSSList, display_value: str, convert_css: bool = False +) -> str: + r""" + Mutate the ``display_value`` string including LaTeX commands from ``latex_styles``. + + This method builds a recursive latex chain of commands based on the + CSSList input, nested around ``display_value``. + + If a CSS style is given as ('', '') this is translated to + '\{display_value}', and this value is treated as the + display value for the next iteration. + + The most recent style forms the inner component, for example for styles: + `[('c1', 'o1'), ('c2', 'o2')]` this returns: `\c1o1{\c2o2{display_value}}` + + Sometimes latex commands have to be wrapped with curly braces in different ways: + We create some parsing flags to identify the different behaviours: + + - `--rwrap` : `\{}` + - `--wrap` : `{\ }` + - `--nowrap` : `\ ` + - `--lwrap` : `{\} ` + - `--dwrap` : `{\}{}` + + For example for styles: + `[('c1', 'o1--wrap'), ('c2', 'o2')]` this returns: `{\c1o1 \c2o2{display_value}} + """ + if convert_css: + latex_styles = _parse_latex_css_conversion(latex_styles) + for command, options in latex_styles[::-1]: # in reverse for most recent style + formatter = { + "--wrap": f"{{\\{command}--to_parse {display_value}}}", + "--nowrap": f"\\{command}--to_parse {display_value}", + "--lwrap": f"{{\\{command}--to_parse}} {display_value}", + "--rwrap": f"\\{command}--to_parse{{{display_value}}}", + "--dwrap": f"{{\\{command}--to_parse}}{{{display_value}}}", + } + display_value = f"\\{command}{options} {display_value}" + for arg in ["--nowrap", "--wrap", "--lwrap", "--rwrap", "--dwrap"]: + if arg in str(options): + display_value = formatter[arg].replace( + "--to_parse", _parse_latex_options_strip(value=options, arg=arg) + ) + break # only ever one purposeful entry + return display_value + + +def _parse_latex_header_span( + cell: dict[str, Any], + multirow_align: str, + multicol_align: str, + wrap: bool = False, + convert_css: bool = False, +) -> str: + r""" + Refactor the cell `display_value` if a 'colspan' or 'rowspan' attribute is present. + + 'rowspan' and 'colspan' do not occur simultaneouly. If they are detected then + the `display_value` is altered to a LaTeX `multirow` or `multicol` command + respectively, with the appropriate cell-span. + + ``wrap`` is used to enclose the `display_value` in braces which is needed for + column headers using an siunitx package. + + Requires the package {multirow}, whereas multicol support is usually built in + to the {tabular} environment. + + Examples + -------- + >>> cell = {'cellstyle': '', 'display_value':'text', 'attributes': 'colspan="3"'} + >>> _parse_latex_header_span(cell, 't', 'c') + '\\multicolumn{3}{c}{text}' + """ + display_val = _parse_latex_cell_styles( + cell["cellstyle"], cell["display_value"], convert_css + ) + if "attributes" in cell: + attrs = cell["attributes"] + if 'colspan="' in attrs: + colspan = attrs[attrs.find('colspan="') + 9 :] # len('colspan="') = 9 + colspan = int(colspan[: colspan.find('"')]) + if "naive-l" == multicol_align: + out = f"{{{display_val}}}" if wrap else f"{display_val}" + blanks = " & {}" if wrap else " &" + return out + blanks * (colspan - 1) + elif "naive-r" == multicol_align: + out = f"{{{display_val}}}" if wrap else f"{display_val}" + blanks = "{} & " if wrap else "& " + return blanks * (colspan - 1) + out + return f"\\multicolumn{{{colspan}}}{{{multicol_align}}}{{{display_val}}}" + elif 'rowspan="' in attrs: + if multirow_align == "naive": + return display_val + rowspan = attrs[attrs.find('rowspan="') + 9 :] + rowspan = int(rowspan[: rowspan.find('"')]) + return f"\\multirow[{multirow_align}]{{{rowspan}}}{{*}}{{{display_val}}}" + if wrap: + return f"{{{display_val}}}" + else: + return display_val + + +def _parse_latex_options_strip(value: str | float, arg: str) -> str: + """ + Strip a css_value which may have latex wrapping arguments, css comment identifiers, + and whitespaces, to a valid string for latex options parsing. + + For example: 'red /* --wrap */ ' --> 'red' + """ + return str(value).replace(arg, "").replace("/*", "").replace("*/", "").strip() + + +def _parse_latex_css_conversion(styles: CSSList) -> CSSList: + """ + Convert CSS (attribute,value) pairs to equivalent LaTeX (command,options) pairs. + + Ignore conversion if tagged with `--latex` option, skipped if no conversion found. + """ + + def font_weight(value, arg): + if value in ("bold", "bolder"): + return "bfseries", f"{arg}" + return None + + def font_style(value, arg): + if value == "italic": + return "itshape", f"{arg}" + if value == "oblique": + return "slshape", f"{arg}" + return None + + def color(value, user_arg, command, comm_arg): + """ + CSS colors have 5 formats to process: + + - 6 digit hex code: "#ff23ee" --> [HTML]{FF23EE} + - 3 digit hex code: "#f0e" --> [HTML]{FF00EE} + - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000} + - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000} + - string: red --> {red} + + Additionally rgb or rgba can be expressed in % which is also parsed. + """ + arg = user_arg if user_arg != "" else comm_arg + + if value[0] == "#" and len(value) == 7: # color is hex code + return command, f"[HTML]{{{value[1:].upper()}}}{arg}" + if value[0] == "#" and len(value) == 4: # color is short hex code + val = f"{value[1].upper()*2}{value[2].upper()*2}{value[3].upper()*2}" + return command, f"[HTML]{{{val}}}{arg}" + elif value[:3] == "rgb": # color is rgb or rgba + r = re.findall("(?<=\\()[0-9\\s%]+(?=,)", value)[0].strip() + r = float(r[:-1]) / 100 if "%" in r else int(r) / 255 + g = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[0].strip() + g = float(g[:-1]) / 100 if "%" in g else int(g) / 255 + if value[3] == "a": # color is rgba + b = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[1].strip() + else: # color is rgb + b = re.findall("(?<=,)[0-9\\s%]+(?=\\))", value)[0].strip() + b = float(b[:-1]) / 100 if "%" in b else int(b) / 255 + return command, f"[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}" + else: + return command, f"{{{value}}}{arg}" # color is likely string-named + + CONVERTED_ATTRIBUTES: dict[str, Callable] = { + "font-weight": font_weight, + "background-color": partial(color, command="cellcolor", comm_arg="--lwrap"), + "color": partial(color, command="color", comm_arg=""), + "font-style": font_style, + } + + latex_styles: CSSList = [] + for attribute, value in styles: + if isinstance(value, str) and "--latex" in value: + # return the style without conversion but drop '--latex' + latex_styles.append((attribute, value.replace("--latex", ""))) + if attribute in CONVERTED_ATTRIBUTES: + arg = "" + for x in ["--wrap", "--nowrap", "--lwrap", "--dwrap", "--rwrap"]: + if x in str(value): + arg, value = x, _parse_latex_options_strip(value, x) + break + latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg) + if latex_style is not None: + latex_styles.extend([latex_style]) + return latex_styles + + +def _escape_latex(s: str) -> str: + r""" + Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``, + ``~``, ``^``, and ``\`` in the string with LaTeX-safe sequences. + + Use this if you need to display text that might contain such characters in LaTeX. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + return ( + s.replace("\\", "ab2§=§8yz") # rare string for final conversion: avoid \\ clash + .replace("ab2§=§8yz ", "ab2§=§8yz\\space ") # since \backslash gobbles spaces + .replace("&", "\\&") + .replace("%", "\\%") + .replace("$", "\\$") + .replace("#", "\\#") + .replace("_", "\\_") + .replace("{", "\\{") + .replace("}", "\\}") + .replace("~ ", "~\\space ") # since \textasciitilde gobbles spaces + .replace("~", "\\textasciitilde ") + .replace("^ ", "^\\space ") # since \textasciicircum gobbles spaces + .replace("^", "\\textasciicircum ") + .replace("ab2§=§8yz", "\\textbackslash ") + ) + + +def _math_mode_with_dollar(s: str) -> str: + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which start with + the character ``$`` and end with ``$``, are preserved + without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\$", r"rt8§=§7wz") + pattern = re.compile(r"\$.*?\$") + pos = 0 + ps = pattern.search(s, pos) + res = [] + while ps: + res.append(_escape_latex(s[pos : ps.span()[0]])) + res.append(ps.group()) + pos = ps.span()[1] + ps = pattern.search(s, pos) + + res.append(_escape_latex(s[pos : len(s)])) + return "".join(res).replace(r"rt8§=§7wz", r"\$") + + +def _math_mode_with_parentheses(s: str) -> str: + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which start with + the character ``\(`` and end with ``\)``, are preserved + without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\(", r"LEFT§=§6yzLEFT").replace(r"\)", r"RIGHTab5§=§RIGHT") + res = [] + for item in re.split(r"LEFT§=§6yz|ab5§=§RIGHT", s): + if item.startswith("LEFT") and item.endswith("RIGHT"): + res.append(item.replace("LEFT", r"\(").replace("RIGHT", r"\)")) + elif "LEFT" in item and "RIGHT" in item: + res.append( + _escape_latex(item).replace("LEFT", r"\(").replace("RIGHT", r"\)") + ) + else: + res.append( + _escape_latex(item) + .replace("LEFT", r"\textbackslash (") + .replace("RIGHT", r"\textbackslash )") + ) + return "".join(res) + + +def _escape_latex_math(s: str) -> str: + r""" + All characters in LaTeX math mode are preserved. + + The substrings in LaTeX math mode, which either are surrounded + by two characters ``$`` or start with the character ``\(`` and end with ``\)``, + are preserved without escaping. Otherwise regular LaTeX escaping applies. + + Parameters + ---------- + s : str + Input to be escaped + + Return + ------ + str : + Escaped string + """ + s = s.replace(r"\$", r"rt8§=§7wz") + ps_d = re.compile(r"\$.*?\$").search(s, 0) + ps_p = re.compile(r"\(.*?\)").search(s, 0) + mode = [] + if ps_d: + mode.append(ps_d.span()[0]) + if ps_p: + mode.append(ps_p.span()[0]) + if len(mode) == 0: + return _escape_latex(s.replace(r"rt8§=§7wz", r"\$")) + if s[mode[0]] == r"$": + return _math_mode_with_dollar(s.replace(r"rt8§=§7wz", r"\$")) + if s[mode[0] - 1 : mode[0] + 1] == r"\(": + return _math_mode_with_parentheses(s.replace(r"rt8§=§7wz", r"\$")) + else: + return _escape_latex(s.replace(r"rt8§=§7wz", r"\$")) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl new file mode 100644 index 0000000000000000000000000000000000000000..8c63be3ad788a8abddf3588b2b9dd6d6126f5df3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl @@ -0,0 +1,16 @@ +{# Update the html_style/table_structure.html documentation too #} +{% if doctype_html %} + + + + +{% if not exclude_styles %}{% include html_style_tpl %}{% endif %} + + +{% include html_table_tpl %} + + +{% elif not doctype_html %} +{% if not exclude_styles %}{% include html_style_tpl %}{% endif %} +{% include html_table_tpl %} +{% endif %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl new file mode 100644 index 0000000000000000000000000000000000000000..5c3fcd97f51bbec263399922579420dfa9ceef9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl @@ -0,0 +1,26 @@ +{%- block before_style -%}{%- endblock before_style -%} +{% block style %} + +{% endblock style %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl new file mode 100644 index 0000000000000000000000000000000000000000..17118d2bb21ccd185780d44c83a5242b12bd2a0d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl @@ -0,0 +1,63 @@ +{% block before_table %}{% endblock before_table %} +{% block table %} +{% if exclude_styles %} + +{% else %} +
+{% endif %} +{% block caption %} +{% if caption and caption is string %} + +{% elif caption and caption is sequence %} + +{% endif %} +{% endblock caption %} +{% block thead %} + +{% block before_head_rows %}{% endblock %} +{% for r in head %} +{% block head_tr scoped %} + +{% if exclude_styles %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} {{c.attributes}}>{{c.display_value}} +{% endif %} +{% endfor %} +{% else %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}} +{% endif %} +{% endfor %} +{% endif %} + +{% endblock head_tr %} +{% endfor %} +{% block after_head_rows %}{% endblock %} + +{% endblock thead %} +{% block tbody %} + +{% block before_rows %}{% endblock before_rows %} +{% for r in body %} +{% block tr scoped %} + +{% if exclude_styles %} +{% for c in r %}{% if c.is_visible != False %} + <{{c.type}} {{c.attributes}}>{{c.display_value}} +{% endif %}{% endfor %} +{% else %} +{% for c in r %}{% if c.is_visible != False %} + <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}} +{% endif %}{% endfor %} +{% endif %} + +{% endblock tr %} +{% endfor %} +{% block after_rows %}{% endblock after_rows %} + +{% endblock tbody %} +
{{caption}}{{caption[0]}}
+{% endblock table %} +{% block after_table %}{% endblock after_table %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl new file mode 100644 index 0000000000000000000000000000000000000000..ae341bbc29823489d9d15e354fae0ce2e10a046d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl @@ -0,0 +1,5 @@ +{% if environment == "longtable" %} +{% include "latex_longtable.tpl" %} +{% else %} +{% include "latex_table.tpl" %} +{% endif %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl new file mode 100644 index 0000000000000000000000000000000000000000..b97843eeb918da1b12f6f2edd585c8e42d6b7bb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl @@ -0,0 +1,82 @@ +\begin{longtable} +{%- set position = parse_table(table_styles, 'position') %} +{%- if position is not none %} +[{{position}}] +{%- endif %} +{%- set column_format = parse_table(table_styles, 'column_format') %} +{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %} + +{% for style in table_styles %} +{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format', 'label'] %} +\{{style['selector']}}{{parse_table(table_styles, style['selector'])}} +{% endif %} +{% endfor %} +{% if caption and caption is string %} +\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} + \label{{label}} +{%- endif %} \\ +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} + \label{{label}} +{%- endif %} \\ +{% else %} +{%- set label = parse_table(table_styles, 'label') %} +{%- if label is not none %} +\label{{label}} \\ +{% endif %} +{% endif %} +{% set toprule = parse_table(table_styles, 'toprule') %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\ +{% endfor %} +{% set midrule = parse_table(table_styles, 'midrule') %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endfirsthead +{% if caption and caption is string %} +\caption[]{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} \\ +{% elif caption and caption is sequence %} +\caption[]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} \\ +{% endif %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\ +{% endfor %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endhead +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\multicolumn{% raw %}{{% endraw %}{{body[0]|length}}{% raw %}}{% endraw %}{r}{Continued on next page} \\ +{% if midrule is not none %} +\{{midrule}} +{% endif %} +\endfoot +{% set bottomrule = parse_table(table_styles, 'bottomrule') %} +{% if bottomrule is not none %} +\{{bottomrule}} +{% endif %} +\endlastfoot +{% for row in body %} +{% for c in row %}{% if not loop.first %} & {% endif %} + {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %} +{%- endfor %} \\ +{% if clines and clines[loop.index] | length > 0 %} + {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %} + +{% endif %} +{% endfor %} +\end{longtable} +{% raw %}{% endraw %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl new file mode 100644 index 0000000000000000000000000000000000000000..7858cb4c945534a4d21cd4474460fd1abcf01f82 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl @@ -0,0 +1,57 @@ +{% if environment or parse_wrap(table_styles, caption) %} +\begin{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %} +{%- set position = parse_table(table_styles, 'position') %} +{%- if position is not none %} +[{{position}}] +{%- endif %} + +{% set position_float = parse_table(table_styles, 'position_float') %} +{% if position_float is not none%} +\{{position_float}} +{% endif %} +{% if caption and caption is string %} +\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} + +{% elif caption and caption is sequence %} +\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} + +{% endif %} +{% for style in table_styles %} +{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %} +\{{style['selector']}}{{parse_table(table_styles, style['selector'])}} +{% endif %} +{% endfor %} +{% endif %} +\begin{tabular} +{%- set column_format = parse_table(table_styles, 'column_format') %} +{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %} + +{% set toprule = parse_table(table_styles, 'toprule') %} +{% if toprule is not none %} +\{{toprule}} +{% endif %} +{% for row in head %} +{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx, convert_css)}}{% endfor %} \\ +{% endfor %} +{% set midrule = parse_table(table_styles, 'midrule') %} +{% if midrule is not none %} +\{{midrule}} +{% endif %} +{% for row in body %} +{% for c in row %}{% if not loop.first %} & {% endif %} + {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align, False, convert_css)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %} +{%- endfor %} \\ +{% if clines and clines[loop.index] | length > 0 %} + {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %} + +{% endif %} +{% endfor %} +{% set bottomrule = parse_table(table_styles, 'bottomrule') %} +{% if bottomrule is not none %} +\{{bottomrule}} +{% endif %} +\end{tabular} +{% if environment or parse_wrap(table_styles, caption) %} +\end{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %} + +{% endif %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl new file mode 100644 index 0000000000000000000000000000000000000000..06aeb2b4e413c61a912b535056c19c794d4b9c85 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl @@ -0,0 +1,12 @@ +{% for r in head %} +{% for c in r %}{% if c["is_visible"] %} +{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %} +{% endif %}{% endfor %} + +{% endfor %} +{% for r in body %} +{% for c in r %}{% if c["is_visible"] %} +{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %} +{% endif %}{% endfor %} + +{% endfor %} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/xml.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/xml.py new file mode 100644 index 0000000000000000000000000000000000000000..f56fca8d7ef4446727bfa34166b0c6b5a2856338 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/formats/xml.py @@ -0,0 +1,560 @@ +""" +:mod:`pandas.io.formats.xml` is a module for formatting data in XML. +""" +from __future__ import annotations + +import codecs +import io +from typing import ( + TYPE_CHECKING, + Any, + final, +) +import warnings + +from pandas.errors import AbstractMethodError +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.missing import isna + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle +from pandas.io.xml import ( + get_data_from_filepath, + preprocess_data, +) + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas import DataFrame + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "path_or_buffer", +) +class _BaseXMLFormatter: + """ + Subclass for formatting data in XML. + + Parameters + ---------- + path_or_buffer : str or file-like + This can be either a string of raw XML, a valid URL, + file or file-like object. + + index : bool + Whether to include index in xml document. + + row_name : str + Name for root of xml document. Default is 'data'. + + root_name : str + Name for row elements of xml document. Default is 'row'. + + na_rep : str + Missing data representation. + + attrs_cols : list + List of columns to write as attributes in row element. + + elem_cols : list + List of columns to write as children in row element. + + namespaces : dict + The namespaces to define in XML document as dicts with key + being namespace and value the URI. + + prefix : str + The prefix for each element in XML document including root. + + encoding : str + Encoding of xml object or document. + + xml_declaration : bool + Whether to include xml declaration at top line item in xml. + + pretty_print : bool + Whether to write xml document with line breaks and indentation. + + stylesheet : str or file-like + A URL, file, file-like object, or a raw string containing XSLT. + + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + See also + -------- + pandas.io.formats.xml.EtreeXMLFormatter + pandas.io.formats.xml.LxmlXMLFormatter + + """ + + def __init__( + self, + frame: DataFrame, + path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, + index: bool = True, + root_name: str | None = "data", + row_name: str | None = "row", + na_rep: str | None = None, + attr_cols: list[str] | None = None, + elem_cols: list[str] | None = None, + namespaces: dict[str | None, str] | None = None, + prefix: str | None = None, + encoding: str = "utf-8", + xml_declaration: bool | None = True, + pretty_print: bool | None = True, + stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + ) -> None: + self.frame = frame + self.path_or_buffer = path_or_buffer + self.index = index + self.root_name = root_name + self.row_name = row_name + self.na_rep = na_rep + self.attr_cols = attr_cols + self.elem_cols = elem_cols + self.namespaces = namespaces + self.prefix = prefix + self.encoding = encoding + self.xml_declaration = xml_declaration + self.pretty_print = pretty_print + self.stylesheet = stylesheet + self.compression: CompressionOptions = compression + self.storage_options = storage_options + + self.orig_cols = self.frame.columns.tolist() + self.frame_dicts = self._process_dataframe() + + self._validate_columns() + self._validate_encoding() + self.prefix_uri = self._get_prefix_uri() + self._handle_indexes() + + def _build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + raise AbstractMethodError(self) + + @final + def _validate_columns(self) -> None: + """ + Validate elems_cols and attrs_cols. + + This method will check if columns is list-like. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + if self.attr_cols and not is_list_like(self.attr_cols): + raise TypeError( + f"{type(self.attr_cols).__name__} is not a valid type for attr_cols" + ) + + if self.elem_cols and not is_list_like(self.elem_cols): + raise TypeError( + f"{type(self.elem_cols).__name__} is not a valid type for elem_cols" + ) + + @final + def _validate_encoding(self) -> None: + """ + Validate encoding. + + This method will check if encoding is among listed under codecs. + + Raises + ------ + LookupError + * If encoding is not available in codecs. + """ + + codecs.lookup(self.encoding) + + @final + def _process_dataframe(self) -> dict[int | str, dict[str, Any]]: + """ + Adjust Data Frame to fit xml output. + + This method will adjust underlying data frame for xml output, + including optionally replacing missing values and including indexes. + """ + + df = self.frame + + if self.index: + df = df.reset_index() + + if self.na_rep is not None: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + df = df.fillna(self.na_rep) + + return df.to_dict(orient="index") + + @final + def _handle_indexes(self) -> None: + """ + Handle indexes. + + This method will add indexes into attr_cols or elem_cols. + """ + + if not self.index: + return + + first_key = next(iter(self.frame_dicts)) + indexes: list[str] = [ + x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols + ] + + if self.attr_cols: + self.attr_cols = indexes + self.attr_cols + + if self.elem_cols: + self.elem_cols = indexes + self.elem_cols + + def _get_prefix_uri(self) -> str: + """ + Get uri of namespace prefix. + + This method retrieves corresponding URI to prefix in namespaces. + + Raises + ------ + KeyError + *If prefix is not included in namespace dict. + """ + + raise AbstractMethodError(self) + + @final + def _other_namespaces(self) -> dict: + """ + Define other namespaces. + + This method will build dictionary of namespaces attributes + for root element, conditionally with optional namespaces and + prefix. + """ + + nmsp_dict: dict[str, str] = {} + if self.namespaces: + nmsp_dict = { + f"xmlns{p if p=='' else f':{p}'}": n + for p, n in self.namespaces.items() + if n != self.prefix_uri[1:-1] + } + + return nmsp_dict + + @final + def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any: + """ + Create attributes of row. + + This method adds attributes using attr_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + + if not self.attr_cols: + return elem_row + + for col in self.attr_cols: + attr_name = self._get_flat_col_name(col) + try: + if not isna(d[col]): + elem_row.attrib[attr_name] = str(d[col]) + except KeyError: + raise KeyError(f"no valid column, {col}") + return elem_row + + @final + def _get_flat_col_name(self, col: str | tuple) -> str: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join([str(c) for c in col]).strip() + if "" in col + else "_".join([str(c) for c in col]).strip() + ) + return f"{self.prefix_uri}{flat_col}" + + @cache_readonly + def _sub_element_cls(self): + raise AbstractMethodError(self) + + @final + def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None: + """ + Create child elements of row. + + This method adds child elements using elem_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + sub_element_cls = self._sub_element_cls + + if not self.elem_cols: + return + + for col in self.elem_cols: + elem_name = self._get_flat_col_name(col) + try: + val = None if isna(d[col]) or d[col] == "" else str(d[col]) + sub_element_cls(elem_row, elem_name).text = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + @final + def write_output(self) -> str | None: + xml_doc = self._build_tree() + + if self.path_or_buffer is not None: + with get_handle( + self.path_or_buffer, + "wb", + compression=self.compression, + storage_options=self.storage_options, + is_text=False, + ) as handles: + handles.handle.write(xml_doc) + return None + + else: + return xml_doc.decode(self.encoding).rstrip() + + +class EtreeXMLFormatter(_BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def _build_tree(self) -> bytes: + from xml.etree.ElementTree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element( + f"{self.prefix_uri}{self.root_name}", attrib=self._other_namespaces() + ) + + for d in self.frame_dicts.values(): + elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(d.keys()) + self._build_elems(d, elem_row) + + else: + elem_row = self._build_attribs(d, elem_row) + self._build_elems(d, elem_row) + + self.out_xml = tostring( + self.root, + method="xml", + encoding=self.encoding, + xml_declaration=self.xml_declaration, + ) + + if self.pretty_print: + self.out_xml = self._prettify_tree() + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + return self.out_xml + + def _get_prefix_uri(self) -> str: + from xml.etree.ElementTree import register_namespace + + uri = "" + if self.namespaces: + for p, n in self.namespaces.items(): + if isinstance(p, str) and isinstance(n, str): + register_namespace(p, n) + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + elif "" in self.namespaces: + uri = f'{{{self.namespaces[""]}}}' + else: + uri = "" + + return uri + + @cache_readonly + def _sub_element_cls(self): + from xml.etree.ElementTree import SubElement + + return SubElement + + def _prettify_tree(self) -> bytes: + """ + Output tree for pretty print format. + + This method will pretty print xml with line breaks and indentation. + """ + + from xml.dom.minidom import parseString + + dom = parseString(self.out_xml) + + return dom.toprettyxml(indent=" ", encoding=self.encoding) + + +class LxmlXMLFormatter(_BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self._convert_empty_str_key() + + def _build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + from lxml.etree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces) + + for d in self.frame_dicts.values(): + elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(d.keys()) + self._build_elems(d, elem_row) + + else: + elem_row = self._build_attribs(d, elem_row) + self._build_elems(d, elem_row) + + self.out_xml = tostring( + self.root, + pretty_print=self.pretty_print, + method="xml", + encoding=self.encoding, + xml_declaration=self.xml_declaration, + ) + + if self.stylesheet is not None: + self.out_xml = self._transform_doc() + + return self.out_xml + + def _convert_empty_str_key(self) -> None: + """ + Replace zero-length string in `namespaces`. + + This method will replace '' with None to align to `lxml` + requirement that empty string prefixes are not allowed. + """ + + if self.namespaces and "" in self.namespaces.keys(): + self.namespaces[None] = self.namespaces.pop("", "default") + + def _get_prefix_uri(self) -> str: + uri = "" + if self.namespaces: + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + elif "" in self.namespaces: + uri = f'{{{self.namespaces[""]}}}' + else: + uri = "" + + return uri + + @cache_readonly + def _sub_element_cls(self): + from lxml.etree import SubElement + + return SubElement + + def _transform_doc(self) -> bytes: + """ + Parse stylesheet from file or buffer and run it. + + This method will parse stylesheet object into tree for parsing + conditionally by its specific object type, then transforms + original tree with XSLT script. + """ + from lxml.etree import ( + XSLT, + XMLParser, + fromstring, + parse, + ) + + style_doc = self.stylesheet + assert style_doc is not None # is ensured by caller + + handle_data = get_data_from_filepath( + filepath_or_buffer=style_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + xsl_doc = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + xsl_doc = parse(xml_data, parser=curr_parser) + + transformer = XSLT(xsl_doc) + new_doc = transformer(self.root) + + return bytes(new_doc) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/html.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/html.py new file mode 100644 index 0000000000000000000000000000000000000000..4eeeb1b655f8ac55309edeacd593f5a5c2516678 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/html.py @@ -0,0 +1,1259 @@ +""" +:mod:`pandas.io.html` is a module containing functionality for dealing with +HTML IO. + +""" + +from __future__ import annotations + +from collections import abc +import numbers +import re +from re import Pattern +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + EmptyDataError, +) +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import is_list_like + +from pandas import isna +from pandas.core.indexes.base import Index +from pandas.core.indexes.multi import MultiIndex +from pandas.core.series import Series +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + is_file_like, + is_fsspec_url, + is_url, + stringify_path, + validate_header_arg, +) +from pandas.io.formats.printing import pprint_thing +from pandas.io.parsers import TextParser + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas._typing import ( + BaseBuffer, + DtypeBackend, + FilePath, + HTMLFlavors, + ReadBuffer, + StorageOptions, + ) + + from pandas import DataFrame + +############# +# READ HTML # +############# +_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}") + + +def _remove_whitespace(s: str, regex: Pattern = _RE_WHITESPACE) -> str: + """ + Replace extra whitespace inside of a string with a single space. + + Parameters + ---------- + s : str or unicode + The string from which to remove extra whitespace. + regex : re.Pattern + The regular expression to use to remove extra whitespace. + + Returns + ------- + subd : str or unicode + `s` with all extra whitespace replaced with a single space. + """ + return regex.sub(" ", s.strip()) + + +def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]: + """ + Get an iterator given an integer, slice or container. + + Parameters + ---------- + skiprows : int, slice, container + The iterator to use to skip rows; can also be a slice. + + Raises + ------ + TypeError + * If `skiprows` is not a slice, integer, or Container + + Returns + ------- + it : iterable + A proper iterator to use to skip rows of a DataFrame. + """ + if isinstance(skiprows, slice): + start, step = skiprows.start or 0, skiprows.step or 1 + return list(range(start, skiprows.stop, step)) + elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows): + return cast("int | Sequence[int]", skiprows) + elif skiprows is None: + return 0 + raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows") + + +def _read( + obj: FilePath | BaseBuffer, + encoding: str | None, + storage_options: StorageOptions | None, +) -> str | bytes: + """ + Try to read from a url, file or string. + + Parameters + ---------- + obj : str, unicode, path object, or file-like object + + Returns + ------- + raw_text : str + """ + text: str | bytes + if ( + is_url(obj) + or hasattr(obj, "read") + or (isinstance(obj, str) and file_exists(obj)) + ): + with get_handle( + obj, "r", encoding=encoding, storage_options=storage_options + ) as handles: + text = handles.handle.read() + elif isinstance(obj, (str, bytes)): + text = obj + else: + raise TypeError(f"Cannot read object of type '{type(obj).__name__}'") + return text + + +class _HtmlFrameParser: + """ + Base class for parsers that parse HTML into DataFrames. + + Parameters + ---------- + io : str or file-like + This can be either a string of raw HTML, a valid URL using the HTTP, + FTP, or FILE protocols or a file-like object. + + match : str or regex + The text to match in the document. + + attrs : dict + List of HTML element attributes to match. + + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + extract_links : {None, "all", "header", "body", "footer"} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + Attributes + ---------- + io : str or file-like + raw HTML, URL, or file-like object + + match : regex + The text to match in the raw HTML + + attrs : dict-like + A dictionary of valid table attributes to use to search for table + elements. + + encoding : str + Encoding to be used by parser + + displayed_only : bool + Whether or not items with "display:none" should be ignored + + extract_links : {None, "all", "header", "body", "footer"} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + Notes + ----- + To subclass this class effectively you must override the following methods: + * :func:`_build_doc` + * :func:`_attr_getter` + * :func:`_href_getter` + * :func:`_text_getter` + * :func:`_parse_td` + * :func:`_parse_thead_tr` + * :func:`_parse_tbody_tr` + * :func:`_parse_tfoot_tr` + * :func:`_parse_tables` + * :func:`_equals_tag` + See each method's respective documentation for details on their + functionality. + """ + + def __init__( + self, + io: FilePath | ReadBuffer[str] | ReadBuffer[bytes], + match: str | Pattern, + attrs: dict[str, str] | None, + encoding: str, + displayed_only: bool, + extract_links: Literal[None, "header", "footer", "body", "all"], + storage_options: StorageOptions = None, + ) -> None: + self.io = io + self.match = match + self.attrs = attrs + self.encoding = encoding + self.displayed_only = displayed_only + self.extract_links = extract_links + self.storage_options = storage_options + + def parse_tables(self): + """ + Parse and return all tables from the DOM. + + Returns + ------- + list of parsed (header, body, footer) tuples from tables. + """ + tables = self._parse_tables(self._build_doc(), self.match, self.attrs) + return (self._parse_thead_tbody_tfoot(table) for table in tables) + + def _attr_getter(self, obj, attr): + """ + Return the attribute value of an individual DOM node. + + Parameters + ---------- + obj : node-like + A DOM node. + + attr : str or unicode + The attribute, such as "colspan" + + Returns + ------- + str or unicode + The attribute value. + """ + # Both lxml and BeautifulSoup have the same implementation: + return obj.get(attr) + + def _href_getter(self, obj) -> str | None: + """ + Return a href if the DOM node contains a child or None. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + href : str or unicode + The href from the child of the DOM node. + """ + raise AbstractMethodError(self) + + def _text_getter(self, obj): + """ + Return the text of an individual DOM node. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + text : str or unicode + The text from an individual DOM node. + """ + raise AbstractMethodError(self) + + def _parse_td(self, obj): + """ + Return the td elements from a row element. + + Parameters + ---------- + obj : node-like + A DOM node. + + Returns + ------- + list of node-like + These are the elements of each row, i.e., the columns. + """ + raise AbstractMethodError(self) + + def _parse_thead_tr(self, table): + """ + Return the list of thead row elements from the parsed table element. + + Parameters + ---------- + table : a table element that contains zero or more thead elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tbody_tr(self, table): + """ + Return the list of tbody row elements from the parsed table element. + + HTML5 table bodies consist of either 0 or more elements (which + only contain elements) or 0 or more elements. This method + checks for both structures. + + Parameters + ---------- + table : a table element that contains row elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tfoot_tr(self, table): + """ + Return the list of tfoot row elements from the parsed table element. + + Parameters + ---------- + table : a table element that contains row elements. + + Returns + ------- + list of node-like + These are the row elements of a table. + """ + raise AbstractMethodError(self) + + def _parse_tables(self, document, match, attrs): + """ + Return all tables from the parsed DOM. + + Parameters + ---------- + document : the DOM from which to parse the table element. + + match : str or regular expression + The text to search for in the DOM tree. + + attrs : dict + A dictionary of table attributes that can be used to disambiguate + multiple tables on a page. + + Raises + ------ + ValueError : `match` does not match any text in the document. + + Returns + ------- + list of node-like + HTML
elements to be parsed into raw data. + """ + raise AbstractMethodError(self) + + def _equals_tag(self, obj, tag) -> bool: + """ + Return whether an individual DOM node matches a tag + + Parameters + ---------- + obj : node-like + A DOM node. + + tag : str + Tag name to be checked for equality. + + Returns + ------- + boolean + Whether `obj`'s tag name is `tag` + """ + raise AbstractMethodError(self) + + def _build_doc(self): + """ + Return a tree-like object that can be used to iterate over the DOM. + + Returns + ------- + node-like + The DOM from which to parse the table element. + """ + raise AbstractMethodError(self) + + def _parse_thead_tbody_tfoot(self, table_html): + """ + Given a table, return parsed header, body, and foot. + + Parameters + ---------- + table_html : node-like + + Returns + ------- + tuple of (header, body, footer), each a list of list-of-text rows. + + Notes + ----- + Header and body are lists-of-lists. Top level list is a list of + rows. Each row is a list of str text. + + Logic: Use , , elements to identify + header, body, and footer, otherwise: + - Put all rows into body + - Move rows from top of body to header only if + all elements inside row are . Move the top all- or + while body_rows and row_is_all_th(body_rows[0]): + header_rows.append(body_rows.pop(0)) + + header = self._expand_colspan_rowspan(header_rows, section="header") + body = self._expand_colspan_rowspan(body_rows, section="body") + footer = self._expand_colspan_rowspan(footer_rows, section="footer") + + return header, body, footer + + def _expand_colspan_rowspan( + self, rows, section: Literal["header", "footer", "body"] + ): + """ + Given a list of s, return a list of text rows. + + Parameters + ---------- + rows : list of node-like + List of s + section : the section that the rows belong to (header, body or footer). + + Returns + ------- + list of list + Each returned row is a list of str text, or tuple (text, link) + if extract_links is not None. + + Notes + ----- + Any cell with ``rowspan`` or ``colspan`` will have its contents copied + to subsequent cells. + """ + all_texts = [] # list of rows, each a list of str + text: str | tuple + remainder: list[ + tuple[int, str | tuple, int] + ] = [] # list of (index, text, nrows) + + for tr in rows: + texts = [] # the output for this row + next_remainder = [] + + index = 0 + tds = self._parse_td(tr) + for td in tds: + # Append texts from previous rows with rowspan>1 that come + # before this or (see _parse_thead_tr). + return row.xpath("./td|./th") + + def _parse_tables(self, document, match, kwargs): + pattern = match.pattern + + # 1. check all descendants for the given pattern and only search tables + # GH 49929 + xpath_expr = f"//table[.//text()[re:test(., {repr(pattern)})]]" + + # if any table attributes were given build an xpath expression to + # search for them + if kwargs: + xpath_expr += _build_xpath_expr(kwargs) + + tables = document.xpath(xpath_expr, namespaces=_re_namespace) + + tables = self._handle_hidden_tables(tables, "attrib") + if self.displayed_only: + for table in tables: + # lxml utilizes XPATH 1.0 which does not have regex + # support. As a result, we find all elements with a style + # attribute and iterate them to check for display:none + for elem in table.xpath(".//style"): + elem.drop_tree() + for elem in table.xpath(".//*[@style]"): + if "display:none" in elem.attrib.get("style", "").replace(" ", ""): + elem.drop_tree() + if not tables: + raise ValueError(f"No tables found matching regex {repr(pattern)}") + return tables + + def _equals_tag(self, obj, tag) -> bool: + return obj.tag == tag + + def _build_doc(self): + """ + Raises + ------ + ValueError + * If a URL that lxml cannot parse is passed. + + Exception + * Any other ``Exception`` thrown. For example, trying to parse a + URL that is syntactically correct on a machine with no internet + connection will fail. + + See Also + -------- + pandas.io.html._HtmlFrameParser._build_doc + """ + from lxml.etree import XMLSyntaxError + from lxml.html import ( + HTMLParser, + fromstring, + parse, + ) + + parser = HTMLParser(recover=True, encoding=self.encoding) + + try: + if is_url(self.io): + with get_handle( + self.io, "r", storage_options=self.storage_options + ) as f: + r = parse(f.handle, parser=parser) + else: + # try to parse the input in the simplest way + r = parse(self.io, parser=parser) + try: + r = r.getroot() + except AttributeError: + pass + except (UnicodeDecodeError, OSError) as e: + # if the input is a blob of html goop + if not is_url(self.io): + r = fromstring(self.io, parser=parser) + + try: + r = r.getroot() + except AttributeError: + pass + else: + raise e + else: + if not hasattr(r, "text_content"): + raise XMLSyntaxError("no text parsed from document", 0, 0, 0) + + for br in r.xpath("*//br"): + br.tail = "\n" + (br.tail or "") + + return r + + def _parse_thead_tr(self, table): + rows = [] + + for thead in table.xpath(".//thead"): + rows.extend(thead.xpath("./tr")) + + # HACK: lxml does not clean up the clearly-erroneous + # . (Missing ). Add + # the and _pretend_ it's a ; _parse_td() will find its + # children as though it's a . + # + # Better solution would be to use html5lib. + elements_at_root = thead.xpath("./td|./th") + if elements_at_root: + rows.append(thead) + + return rows + + def _parse_tbody_tr(self, table): + from_tbody = table.xpath(".//tbody//tr") + from_root = table.xpath("./tr") + # HTML spec: at most one of these lists has content + return from_tbody + from_root + + def _parse_tfoot_tr(self, table): + return table.xpath(".//tfoot//tr") + + +def _expand_elements(body) -> None: + data = [len(elem) for elem in body] + lens = Series(data) + lens_max = lens.max() + not_max = lens[lens != lens_max] + + empty = [""] + for ind, length in not_max.items(): + body[ind] += empty * (lens_max - length) + + +def _data_to_frame(**kwargs): + head, body, foot = kwargs.pop("data") + header = kwargs.pop("header") + kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"]) + if head: + body = head + body + + # Infer header when there is a or top
+ - Move rows from bottom of body to footer only if + all elements inside row are + """ + header_rows = self._parse_thead_tr(table_html) + body_rows = self._parse_tbody_tr(table_html) + footer_rows = self._parse_tfoot_tr(table_html) + + def row_is_all_th(row): + return all(self._equals_tag(t, "th") for t in self._parse_td(row)) + + if not header_rows: + # The table has no
rows from + # body_rows to header_rows. (This is a common case because many + # tables in the wild have no
+ while remainder and remainder[0][0] <= index: + prev_i, prev_text, prev_rowspan = remainder.pop(0) + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + index += 1 + + # Append the text from this , colspan times + text = _remove_whitespace(self._text_getter(td)) + if self.extract_links in ("all", section): + href = self._href_getter(td) + text = (text, href) + rowspan = int(self._attr_getter(td, "rowspan") or 1) + colspan = int(self._attr_getter(td, "colspan") or 1) + + for _ in range(colspan): + texts.append(text) + if rowspan > 1: + next_remainder.append((index, text, rowspan - 1)) + index += 1 + + # Append texts from previous rows at the final position + for prev_i, prev_text, prev_rowspan in remainder: + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + + all_texts.append(texts) + remainder = next_remainder + + # Append rows that only appear because the previous row had non-1 + # rowspan + while remainder: + next_remainder = [] + texts = [] + for prev_i, prev_text, prev_rowspan in remainder: + texts.append(prev_text) + if prev_rowspan > 1: + next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) + all_texts.append(texts) + remainder = next_remainder + + return all_texts + + def _handle_hidden_tables(self, tbl_list, attr_name: str): + """ + Return list of tables, potentially removing hidden elements + + Parameters + ---------- + tbl_list : list of node-like + Type of list elements will vary depending upon parser used + attr_name : str + Name of the accessor for retrieving HTML attributes + + Returns + ------- + list of node-like + Return type matches `tbl_list` + """ + if not self.displayed_only: + return tbl_list + + return [ + x + for x in tbl_list + if "display:none" + not in getattr(x, attr_name).get("style", "").replace(" ", "") + ] + + +class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): + """ + HTML to DataFrame parser that uses BeautifulSoup under the hood. + + See Also + -------- + pandas.io.html._HtmlFrameParser + pandas.io.html._LxmlFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`pandas.io.html._HtmlFrameParser`. + """ + + def _parse_tables(self, document, match, attrs): + element_name = "table" + tables = document.find_all(element_name, attrs=attrs) + if not tables: + raise ValueError("No tables found") + + result = [] + unique_tables = set() + tables = self._handle_hidden_tables(tables, "attrs") + + for table in tables: + if self.displayed_only: + for elem in table.find_all("style"): + elem.decompose() + + for elem in table.find_all(style=re.compile(r"display:\s*none")): + elem.decompose() + + if table not in unique_tables and table.find(string=match) is not None: + result.append(table) + unique_tables.add(table) + if not result: + raise ValueError(f"No tables found matching pattern {repr(match.pattern)}") + return result + + def _href_getter(self, obj) -> str | None: + a = obj.find("a", href=True) + return None if not a else a["href"] + + def _text_getter(self, obj): + return obj.text + + def _equals_tag(self, obj, tag) -> bool: + return obj.name == tag + + def _parse_td(self, row): + return row.find_all(("td", "th"), recursive=False) + + def _parse_thead_tr(self, table): + return table.select("thead tr") + + def _parse_tbody_tr(self, table): + from_tbody = table.select("tbody tr") + from_root = table.find_all("tr", recursive=False) + # HTML spec: at most one of these lists has content + return from_tbody + from_root + + def _parse_tfoot_tr(self, table): + return table.select("tfoot tr") + + def _setup_build_doc(self): + raw_text = _read(self.io, self.encoding, self.storage_options) + if not raw_text: + raise ValueError(f"No text parsed from document: {self.io}") + return raw_text + + def _build_doc(self): + from bs4 import BeautifulSoup + + bdoc = self._setup_build_doc() + if isinstance(bdoc, bytes) and self.encoding is not None: + udoc = bdoc.decode(self.encoding) + from_encoding = None + else: + udoc = bdoc + from_encoding = self.encoding + + soup = BeautifulSoup(udoc, features="html5lib", from_encoding=from_encoding) + + for br in soup.find_all("br"): + br.replace_with("\n" + br.text) + + return soup + + +def _build_xpath_expr(attrs) -> str: + """ + Build an xpath expression to simulate bs4's ability to pass in kwargs to + search for attributes when using the lxml parser. + + Parameters + ---------- + attrs : dict + A dict of HTML attributes. These are NOT checked for validity. + + Returns + ------- + expr : unicode + An XPath expression that checks for the given HTML attributes. + """ + # give class attribute as class_ because class is a python keyword + if "class_" in attrs: + attrs["class"] = attrs.pop("class_") + + s = " and ".join([f"@{k}={repr(v)}" for k, v in attrs.items()]) + return f"[{s}]" + + +_re_namespace = {"re": "http://exslt.org/regular-expressions"} + + +class _LxmlFrameParser(_HtmlFrameParser): + """ + HTML to DataFrame parser that uses lxml under the hood. + + Warning + ------- + This parser can only handle HTTP, FTP, and FILE urls. + + See Also + -------- + _HtmlFrameParser + _BeautifulSoupLxmlFrameParser + + Notes + ----- + Documentation strings for this class are in the base class + :class:`_HtmlFrameParser`. + """ + + def _href_getter(self, obj) -> str | None: + href = obj.xpath(".//a/@href") + return None if not href else href[0] + + def _text_getter(self, obj): + return obj.text_content() + + def _parse_td(self, row): + # Look for direct children only: the "row" element here may be a + #
foobar
-only rows + if header is None: + if len(head) == 1: + header = 0 + else: + # ignore all-empty-text rows + header = [i for i, row in enumerate(head) if any(text for text in row)] + + if foot: + body += foot + + # fill out elements of body that are "ragged" + _expand_elements(body) + with TextParser(body, header=header, **kwargs) as tp: + return tp.read() + + +_valid_parsers = { + "lxml": _LxmlFrameParser, + None: _LxmlFrameParser, + "html5lib": _BeautifulSoupHtml5LibFrameParser, + "bs4": _BeautifulSoupHtml5LibFrameParser, +} + + +def _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]: + """ + Choose the parser based on the input flavor. + + Parameters + ---------- + flavor : {{"lxml", "html5lib", "bs4"}} or None + The type of parser to use. This must be a valid backend. + + Returns + ------- + cls : _HtmlFrameParser subclass + The parser class based on the requested input flavor. + + Raises + ------ + ValueError + * If `flavor` is not a valid backend. + ImportError + * If you do not have the requested `flavor` + """ + valid_parsers = list(_valid_parsers.keys()) + if flavor not in valid_parsers: + raise ValueError( + f"{repr(flavor)} is not a valid flavor, valid flavors are {valid_parsers}" + ) + + if flavor in ("bs4", "html5lib"): + import_optional_dependency("html5lib") + import_optional_dependency("bs4") + else: + import_optional_dependency("lxml.etree") + return _valid_parsers[flavor] + + +def _print_as_set(s) -> str: + arg = ", ".join([pprint_thing(el) for el in s]) + return f"{{{arg}}}" + + +def _validate_flavor(flavor): + if flavor is None: + flavor = "lxml", "bs4" + elif isinstance(flavor, str): + flavor = (flavor,) + elif isinstance(flavor, abc.Iterable): + if not all(isinstance(flav, str) for flav in flavor): + raise TypeError( + f"Object of type {repr(type(flavor).__name__)} " + f"is not an iterable of strings" + ) + else: + msg = repr(flavor) if isinstance(flavor, str) else str(flavor) + msg += " is not a valid flavor" + raise ValueError(msg) + + flavor = tuple(flavor) + valid_flavors = set(_valid_parsers) + flavor_set = set(flavor) + + if not flavor_set & valid_flavors: + raise ValueError( + f"{_print_as_set(flavor_set)} is not a valid set of flavors, valid " + f"flavors are {_print_as_set(valid_flavors)}" + ) + return flavor + + +def _parse( + flavor, + io, + match, + attrs, + encoding, + displayed_only, + extract_links, + storage_options, + **kwargs, +): + flavor = _validate_flavor(flavor) + compiled_match = re.compile(match) # you can pass a compiled regex here + + retained = None + for flav in flavor: + parser = _parser_dispatch(flav) + p = parser( + io, + compiled_match, + attrs, + encoding, + displayed_only, + extract_links, + storage_options, + ) + + try: + tables = p.parse_tables() + except ValueError as caught: + # if `io` is an io-like object, check if it's seekable + # and try to rewind it before trying the next parser + if hasattr(io, "seekable") and io.seekable(): + io.seek(0) + elif hasattr(io, "seekable") and not io.seekable(): + # if we couldn't rewind it, let the user know + raise ValueError( + f"The flavor {flav} failed to parse your input. " + "Since you passed a non-rewindable file " + "object, we can't rewind it to try " + "another parser. Try read_html() with a different flavor." + ) from caught + + retained = caught + else: + break + else: + assert retained is not None # for mypy + raise retained + + ret = [] + for table in tables: + try: + df = _data_to_frame(data=table, **kwargs) + # Cast MultiIndex header to an Index of tuples when extracting header + # links and replace nan with None (therefore can't use mi.to_flat_index()). + # This maintains consistency of selection (e.g. df.columns.str[1]) + if extract_links in ("all", "header") and isinstance( + df.columns, MultiIndex + ): + df.columns = Index( + ((col[0], None if isna(col[1]) else col[1]) for col in df.columns), + tupleize_cols=False, + ) + + ret.append(df) + except EmptyDataError: # empty table + continue + return ret + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_html( + io: FilePath | ReadBuffer[str], + *, + match: str | Pattern = ".+", + flavor: HTMLFlavors | Sequence[HTMLFlavors] | None = None, + header: int | Sequence[int] | None = None, + index_col: int | Sequence[int] | None = None, + skiprows: int | Sequence[int] | slice | None = None, + attrs: dict[str, str] | None = None, + parse_dates: bool = False, + thousands: str | None = ",", + encoding: str | None = None, + decimal: str = ".", + converters: dict | None = None, + na_values: Iterable[object] | None = None, + keep_default_na: bool = True, + displayed_only: bool = True, + extract_links: Literal[None, "header", "footer", "body", "all"] = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + storage_options: StorageOptions = None, +) -> list[DataFrame]: + r""" + Read HTML tables into a ``list`` of ``DataFrame`` objects. + + Parameters + ---------- + io : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a string ``read()`` function. + The string can represent a URL or the HTML itself. Note that + lxml only accepts the http, ftp and file url protocols. If you have a + URL that starts with ``'https'`` you might try removing the ``'s'``. + + .. deprecated:: 2.1.0 + Passing html literal strings is deprecated. + Wrap literal string/bytes input in ``io.StringIO``/``io.BytesIO`` instead. + + match : str or compiled regular expression, optional + The set of tables containing text matching this regex or string will be + returned. Unless the HTML is extremely simple you will probably need to + pass a non-empty string here. Defaults to '.+' (match any non-empty + string). The default value will return all tables contained on a page. + This value is converted to a regular expression so that there is + consistent behavior between Beautiful Soup and lxml. + + flavor : {{"lxml", "html5lib", "bs4"}} or list-like, optional + The parsing engine (or list of parsing engines) to use. 'bs4' and + 'html5lib' are synonymous with each other, they are both there for + backwards compatibility. The default of ``None`` tries to use ``lxml`` + to parse and if that fails it falls back on ``bs4`` + ``html5lib``. + + header : int or list-like, optional + The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to + make the columns headers. + + index_col : int or list-like, optional + The column (or list of columns) to use to create the index. + + skiprows : int, list-like or slice, optional + Number of rows to skip after parsing the column integer. 0-based. If a + sequence of integers or a slice is given, will skip the rows indexed by + that sequence. Note that a single element sequence means 'skip the nth + row' whereas an integer means 'skip n rows'. + + attrs : dict, optional + This is a dictionary of attributes that you can pass to use to identify + the table in the HTML. These are not checked for validity before being + passed to lxml or Beautiful Soup. However, these attributes must be + valid HTML table attributes to work correctly. For example, :: + + attrs = {{'id': 'table'}} + + is a valid attribute dictionary because the 'id' HTML tag attribute is + a valid HTML attribute for *any* HTML tag as per `this document + `__. :: + + attrs = {{'asdf': 'table'}} + + is *not* a valid attribute dictionary because 'asdf' is not a valid + HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 + table attributes can be found `here + `__. A + working draft of the HTML 5 spec can be found `here + `__. It contains the + latest information on table attributes for the modern web. + + parse_dates : bool, optional + See :func:`~read_csv` for more details. + + thousands : str, optional + Separator to use to parse thousands. Defaults to ``','``. + + encoding : str, optional + The encoding used to decode the web page. Defaults to ``None``.``None`` + preserves the previous encoding behavior, which depends on the + underlying parser library (e.g., the parser library will try to use + the encoding provided by the document). + + decimal : str, default '.' + Character to recognize as decimal point (e.g. use ',' for European + data). + + converters : dict, default None + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the cell (not column) content, and return the + transformed content. + + na_values : iterable, default None + Custom NA values. + + keep_default_na : bool, default True + If na_values are specified and keep_default_na is False the default NaN + values are overridden, otherwise they're appended to. + + displayed_only : bool, default True + Whether elements with "display: none" should be parsed. + + extract_links : {{None, "all", "header", "body", "footer"}} + Table elements in the specified section(s) with tags will have their + href extracted. + + .. versionadded:: 1.5.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + {storage_options} + + .. versionadded:: 2.1.0 + + Returns + ------- + dfs + A list of DataFrames. + + See Also + -------- + read_csv : Read a comma-separated values (csv) file into DataFrame. + + Notes + ----- + Before using this function you should read the :ref:`gotchas about the + HTML parsing libraries `. + + Expect to do some cleanup after you call this function. For example, you + might need to manually assign column names if the column names are + converted to NaN when you pass the `header=0` argument. We try to assume as + little as possible about the structure of the table and push the + idiosyncrasies of the HTML contained in the table to the user. + + This function searches for ```` elements and only for ```` + and ```` or ```` argument, it is used to construct + the header, otherwise the function attempts to find the header within + the body (by putting rows with only ``
`` rows and ```` elements within each ``
`` + element in the table. ```` stands for "table data". This function + attempts to properly handle ``colspan`` and ``rowspan`` attributes. + If the function has a ``
`` elements into the header). + + Similar to :func:`~read_csv` the `header` argument is applied + **after** `skiprows` is applied. + + This function will *always* return a list of :class:`DataFrame` *or* + it will fail, e.g., it will *not* return an empty list. + + Examples + -------- + See the :ref:`read_html documentation in the IO section of the docs + ` for some examples of reading in HTML tables. + """ + # Type check here. We don't want to parse only to fail because of an + # invalid value of an integer skiprows. + if isinstance(skiprows, numbers.Integral) and skiprows < 0: + raise ValueError( + "cannot skip rows starting from the end of the " + "data (you passed a negative value)" + ) + if extract_links not in [None, "header", "footer", "body", "all"]: + raise ValueError( + "`extract_links` must be one of " + '{None, "header", "footer", "body", "all"}, got ' + f'"{extract_links}"' + ) + + validate_header_arg(header) + check_dtype_backend(dtype_backend) + + io = stringify_path(io) + + if isinstance(io, str) and not any( + [ + is_file_like(io), + file_exists(io), + is_url(io), + is_fsspec_url(io), + ] + ): + warnings.warn( + "Passing literal html to 'read_html' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return _parse( + flavor=flavor, + io=io, + match=match, + header=header, + index_col=index_col, + skiprows=skiprows, + parse_dates=parse_dates, + thousands=thousands, + attrs=attrs, + encoding=encoding, + decimal=decimal, + converters=converters, + na_values=na_values, + keep_default_na=keep_default_na, + displayed_only=displayed_only, + extract_links=extract_links, + dtype_backend=dtype_backend, + storage_options=storage_options, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py new file mode 100644 index 0000000000000000000000000000000000000000..9570d6f8b26bd85585e5a46145b7871f1bb6eb3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/parquet.py @@ -0,0 +1,676 @@ +""" parquet compat """ +from __future__ import annotations + +import io +import json +import os +from typing import ( + TYPE_CHECKING, + Any, + Literal, +) +import warnings +from warnings import catch_warnings + +from pandas._config import using_pyarrow_string_dtype +from pandas._config.config import _get_option + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import AbstractMethodError +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +import pandas as pd +from pandas import ( + DataFrame, + get_option, +) +from pandas.core.shared_docs import _shared_docs + +from pandas.io._util import arrow_string_types_mapper +from pandas.io.common import ( + IOHandles, + get_handle, + is_fsspec_url, + is_url, + stringify_path, +) + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeBackend, + FilePath, + ReadBuffer, + StorageOptions, + WriteBuffer, + ) + + +def get_engine(engine: str) -> BaseImpl: + """return our implementation""" + if engine == "auto": + engine = get_option("io.parquet.engine") + + if engine == "auto": + # try engines in this order + engine_classes = [PyArrowImpl, FastParquetImpl] + + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) + + raise ImportError( + "Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "A suitable version of " + "pyarrow or fastparquet is required for parquet " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" + ) + + if engine == "pyarrow": + return PyArrowImpl() + elif engine == "fastparquet": + return FastParquetImpl() + + raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") + + +def _get_path_or_handle( + path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], + fs: Any, + storage_options: StorageOptions | None = None, + mode: str = "rb", + is_dir: bool = False, +) -> tuple[ + FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any +]: + """File handling for PyArrow.""" + path_or_handle = stringify_path(path) + if fs is not None: + pa_fs = import_optional_dependency("pyarrow.fs", errors="ignore") + fsspec = import_optional_dependency("fsspec", errors="ignore") + if pa_fs is not None and isinstance(fs, pa_fs.FileSystem): + if storage_options: + raise NotImplementedError( + "storage_options not supported with a pyarrow FileSystem." + ) + elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem): + pass + else: + raise ValueError( + f"filesystem must be a pyarrow or fsspec FileSystem, " + f"not a {type(fs).__name__}" + ) + if is_fsspec_url(path_or_handle) and fs is None: + if storage_options is None: + pa = import_optional_dependency("pyarrow") + pa_fs = import_optional_dependency("pyarrow.fs") + + try: + fs, path_or_handle = pa_fs.FileSystem.from_uri(path) + except (TypeError, pa.ArrowInvalid): + pass + if fs is None: + fsspec = import_optional_dependency("fsspec") + fs, path_or_handle = fsspec.core.url_to_fs( + path_or_handle, **(storage_options or {}) + ) + elif storage_options and (not is_url(path_or_handle) or mode != "rb"): + # can't write to a remote url + # without making use of fsspec at the moment + raise ValueError("storage_options passed with buffer, or non-supported URL") + + handles = None + if ( + not fs + and not is_dir + and isinstance(path_or_handle, str) + and not os.path.isdir(path_or_handle) + ): + # use get_handle only when we are very certain that it is not a directory + # fsspec resources can also point to directories + # this branch is used for example when reading from non-fsspec URLs + handles = get_handle( + path_or_handle, mode, is_text=False, storage_options=storage_options + ) + fs = None + path_or_handle = handles.handle + return path_or_handle, handles, fs + + +class BaseImpl: + @staticmethod + def validate_dataframe(df: DataFrame) -> None: + if not isinstance(df, DataFrame): + raise ValueError("to_parquet only supports IO with DataFrames") + + def write(self, df: DataFrame, path, compression, **kwargs): + raise AbstractMethodError(self) + + def read(self, path, columns=None, **kwargs) -> DataFrame: + raise AbstractMethodError(self) + + +class PyArrowImpl(BaseImpl): + def __init__(self) -> None: + import_optional_dependency( + "pyarrow", extra="pyarrow is required for parquet support." + ) + import pyarrow.parquet + + # import utils to register the pyarrow extension types + import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401 + + self.api = pyarrow + + def write( + self, + df: DataFrame, + path: FilePath | WriteBuffer[bytes], + compression: str | None = "snappy", + index: bool | None = None, + storage_options: StorageOptions | None = None, + partition_cols: list[str] | None = None, + filesystem=None, + **kwargs, + ) -> None: + self.validate_dataframe(df) + + from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)} + if index is not None: + from_pandas_kwargs["preserve_index"] = index + + table = self.api.Table.from_pandas(df, **from_pandas_kwargs) + + if df.attrs: + df_metadata = {"PANDAS_ATTRS": json.dumps(df.attrs)} + existing_metadata = table.schema.metadata + merged_metadata = {**existing_metadata, **df_metadata} + table = table.replace_schema_metadata(merged_metadata) + + path_or_handle, handles, filesystem = _get_path_or_handle( + path, + filesystem, + storage_options=storage_options, + mode="wb", + is_dir=partition_cols is not None, + ) + if ( + isinstance(path_or_handle, io.BufferedWriter) + and hasattr(path_or_handle, "name") + and isinstance(path_or_handle.name, (str, bytes)) + ): + if isinstance(path_or_handle.name, bytes): + path_or_handle = path_or_handle.name.decode() + else: + path_or_handle = path_or_handle.name + + try: + if partition_cols is not None: + # writes to multiple files under the given path + self.api.parquet.write_to_dataset( + table, + path_or_handle, + compression=compression, + partition_cols=partition_cols, + filesystem=filesystem, + **kwargs, + ) + else: + # write to single output file + self.api.parquet.write_table( + table, + path_or_handle, + compression=compression, + filesystem=filesystem, + **kwargs, + ) + finally: + if handles is not None: + handles.close() + + def read( + self, + path, + columns=None, + filters=None, + use_nullable_dtypes: bool = False, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> DataFrame: + kwargs["use_pandas_metadata"] = True + + to_pandas_kwargs = {} + if dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping() + to_pandas_kwargs["types_mapper"] = mapping.get + elif dtype_backend == "pyarrow": + to_pandas_kwargs["types_mapper"] = pd.ArrowDtype # type: ignore[assignment] + elif using_pyarrow_string_dtype(): + to_pandas_kwargs["types_mapper"] = arrow_string_types_mapper() + + manager = _get_option("mode.data_manager", silent=True) + if manager == "array": + to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment] + + path_or_handle, handles, filesystem = _get_path_or_handle( + path, + filesystem, + storage_options=storage_options, + mode="rb", + ) + try: + pa_table = self.api.parquet.read_table( + path_or_handle, + columns=columns, + filesystem=filesystem, + filters=filters, + **kwargs, + ) + result = pa_table.to_pandas(**to_pandas_kwargs) + + if manager == "array": + result = result._as_manager("array", copy=False) + + if pa_table.schema.metadata: + if b"PANDAS_ATTRS" in pa_table.schema.metadata: + df_metadata = pa_table.schema.metadata[b"PANDAS_ATTRS"] + result.attrs = json.loads(df_metadata) + return result + finally: + if handles is not None: + handles.close() + + +class FastParquetImpl(BaseImpl): + def __init__(self) -> None: + # since pandas is a dependency of fastparquet + # we need to import on first use + fastparquet = import_optional_dependency( + "fastparquet", extra="fastparquet is required for parquet support." + ) + self.api = fastparquet + + def write( + self, + df: DataFrame, + path, + compression: Literal["snappy", "gzip", "brotli"] | None = "snappy", + index=None, + partition_cols=None, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> None: + self.validate_dataframe(df) + + if "partition_on" in kwargs and partition_cols is not None: + raise ValueError( + "Cannot use both partition_on and " + "partition_cols. Use partition_cols for partitioning data" + ) + if "partition_on" in kwargs: + partition_cols = kwargs.pop("partition_on") + + if partition_cols is not None: + kwargs["file_scheme"] = "hive" + + if filesystem is not None: + raise NotImplementedError( + "filesystem is not implemented for the fastparquet engine." + ) + + # cannot use get_handle as write() does not accept file buffers + path = stringify_path(path) + if is_fsspec_url(path): + fsspec = import_optional_dependency("fsspec") + + # if filesystem is provided by fsspec, file must be opened in 'wb' mode. + kwargs["open_with"] = lambda path, _: fsspec.open( + path, "wb", **(storage_options or {}) + ).open() + elif storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) + + with catch_warnings(record=True): + self.api.write( + path, + df, + compression=compression, + write_index=index, + partition_on=partition_cols, + **kwargs, + ) + + def read( + self, + path, + columns=None, + filters=None, + storage_options: StorageOptions | None = None, + filesystem=None, + **kwargs, + ) -> DataFrame: + parquet_kwargs: dict[str, Any] = {} + use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False) + dtype_backend = kwargs.pop("dtype_backend", lib.no_default) + # We are disabling nullable dtypes for fastparquet pending discussion + parquet_kwargs["pandas_nulls"] = False + if use_nullable_dtypes: + raise ValueError( + "The 'use_nullable_dtypes' argument is not supported for the " + "fastparquet engine" + ) + if dtype_backend is not lib.no_default: + raise ValueError( + "The 'dtype_backend' argument is not supported for the " + "fastparquet engine" + ) + if filesystem is not None: + raise NotImplementedError( + "filesystem is not implemented for the fastparquet engine." + ) + path = stringify_path(path) + handles = None + if is_fsspec_url(path): + fsspec = import_optional_dependency("fsspec") + + parquet_kwargs["fs"] = fsspec.open(path, "rb", **(storage_options or {})).fs + elif isinstance(path, str) and not os.path.isdir(path): + # use get_handle only when we are very certain that it is not a directory + # fsspec resources can also point to directories + # this branch is used for example when reading from non-fsspec URLs + handles = get_handle( + path, "rb", is_text=False, storage_options=storage_options + ) + path = handles.handle + + try: + parquet_file = self.api.ParquetFile(path, **parquet_kwargs) + return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs) + finally: + if handles is not None: + handles.close() + + +@doc(storage_options=_shared_docs["storage_options"]) +def to_parquet( + df: DataFrame, + path: FilePath | WriteBuffer[bytes] | None = None, + engine: str = "auto", + compression: str | None = "snappy", + index: bool | None = None, + storage_options: StorageOptions | None = None, + partition_cols: list[str] | None = None, + filesystem: Any = None, + **kwargs, +) -> bytes | None: + """ + Write a DataFrame to the parquet format. + + Parameters + ---------- + df : DataFrame + path : str, path object, file-like object, or None, default None + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. If None, the result is + returned as bytes. If a string, it will be used as Root Directory path + when writing a partitioned dataset. The engine fastparquet does not + accept file-like objects. + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + + When using the ``'pyarrow'`` engine and no storage options are provided + and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec`` + (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first. + Use the filesystem keyword with an instantiated fsspec filesystem + if you wish to use its implementation. + compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, + default 'snappy'. Name of the compression to use. Use ``None`` + for no compression. + index : bool, default None + If ``True``, include the dataframe's index(es) in the file output. If + ``False``, they will not be written to the file. + If ``None``, similar to ``True`` the dataframe's index(es) + will be saved. However, instead of being saved as values, + the RangeIndex will be stored as a range in the metadata so it + doesn't require much space and is faster. Other indexes will + be included as columns in the file output. + partition_cols : str or list, optional, default None + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. + {storage_options} + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. Only implemented + for ``engine="pyarrow"``. + + .. versionadded:: 2.1.0 + + kwargs + Additional keyword arguments passed to the engine + + Returns + ------- + bytes if no path argument is provided else None + """ + if isinstance(partition_cols, str): + partition_cols = [partition_cols] + impl = get_engine(engine) + + path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path + + impl.write( + df, + path_or_buf, + compression=compression, + index=index, + partition_cols=partition_cols, + storage_options=storage_options, + filesystem=filesystem, + **kwargs, + ) + + if path is None: + assert isinstance(path_or_buf, io.BytesIO) + return path_or_buf.getvalue() + else: + return None + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_parquet( + path: FilePath | ReadBuffer[bytes], + engine: str = "auto", + columns: list[str] | None = None, + storage_options: StorageOptions | None = None, + use_nullable_dtypes: bool | lib.NoDefault = lib.no_default, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + filesystem: Any = None, + filters: list[tuple] | list[list[tuple]] | None = None, + **kwargs, +) -> DataFrame: + """ + Load a parquet object from the file path, returning a DataFrame. + + Parameters + ---------- + path : str, path object or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. + The string could be a URL. Valid URL schemes include http, ftp, s3, + gs, and file. For file URLs, a host is expected. A local file could be: + ``file://localhost/path/to/table.parquet``. + A file URL can also be a path to a directory that contains multiple + partitioned parquet files. Both pyarrow and fastparquet support + paths to directories as well as file URLs. A directory path could be: + ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. + engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. + + When using the ``'pyarrow'`` engine and no storage options are provided + and a filesystem is implemented by both ``pyarrow.fs`` and ``fsspec`` + (e.g. "s3://"), then the ``pyarrow.fs`` filesystem is attempted first. + Use the filesystem keyword with an instantiated fsspec filesystem + if you wish to use its implementation. + columns : list, default=None + If not None, only these columns will be read from the file. + {storage_options} + + .. versionadded:: 1.3.0 + + use_nullable_dtypes : bool, default False + If True, use dtypes that use ``pd.NA`` as missing value indicator + for the resulting DataFrame. (only applicable for the ``pyarrow`` + engine) + As new dtypes are added that support ``pd.NA`` in the future, the + output with this option will change to use those dtypes. + Note: this is an experimental option, and behaviour (e.g. additional + support dtypes) may change without notice. + + .. deprecated:: 2.0 + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + filesystem : fsspec or pyarrow filesystem, default None + Filesystem object to use when reading the parquet file. Only implemented + for ``engine="pyarrow"``. + + .. versionadded:: 2.1.0 + + filters : List[Tuple] or List[List[Tuple]], default None + To filter out data. + Filter syntax: [[(column, op, val), ...],...] + where op is [==, =, >, >=, <, <=, !=, in, not in] + The innermost tuples are transposed into a set of filters applied + through an `AND` operation. + The outer list combines these sets of filters through an `OR` + operation. + A single list of tuples can also be used, meaning that no `OR` + operation between set of filters is to be conducted. + + Using this argument will NOT result in row-wise filtering of the final + partitions unless ``engine="pyarrow"`` is also specified. For + other engines, filtering is only performed at the partition level, that is, + to prevent the loading of some row-groups and/or files. + + .. versionadded:: 2.1.0 + + **kwargs + Any additional kwargs are passed to the engine. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.to_parquet : Create a parquet object that serializes a DataFrame. + + Examples + -------- + >>> original_df = pd.DataFrame( + ... {{"foo": range(5), "bar": range(5, 10)}} + ... ) + >>> original_df + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> df_parquet_bytes = original_df.to_parquet() + >>> from io import BytesIO + >>> restored_df = pd.read_parquet(BytesIO(df_parquet_bytes)) + >>> restored_df + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> restored_df.equals(original_df) + True + >>> restored_bar = pd.read_parquet(BytesIO(df_parquet_bytes), columns=["bar"]) + >>> restored_bar + bar + 0 5 + 1 6 + 2 7 + 3 8 + 4 9 + >>> restored_bar.equals(original_df[['bar']]) + True + + The function uses `kwargs` that are passed directly to the engine. + In the following example, we use the `filters` argument of the pyarrow + engine to filter the rows of the DataFrame. + + Since `pyarrow` is the default engine, we can omit the `engine` argument. + Note that the `filters` argument is implemented by the `pyarrow` engine, + which can benefit from multithreading and also potentially be more + economical in terms of memory. + + >>> sel = [("foo", ">", 2)] + >>> restored_part = pd.read_parquet(BytesIO(df_parquet_bytes), filters=sel) + >>> restored_part + foo bar + 0 3 8 + 1 4 9 + """ + + impl = get_engine(engine) + + if use_nullable_dtypes is not lib.no_default: + msg = ( + "The argument 'use_nullable_dtypes' is deprecated and will be removed " + "in a future version." + ) + if use_nullable_dtypes is True: + msg += ( + "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True." + ) + warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + else: + use_nullable_dtypes = False + check_dtype_backend(dtype_backend) + + return impl.read( + path, + columns=columns, + filters=filters, + storage_options=storage_options, + use_nullable_dtypes=use_nullable_dtypes, + dtype_backend=dtype_backend, + filesystem=filesystem, + **kwargs, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..0dae0e7106b69a471f0c2702158cfe0f11f0389c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/pickle.py @@ -0,0 +1,210 @@ +""" pickle compat """ +from __future__ import annotations + +import pickle +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +from pandas.compat import pickle_compat as pc +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadPickleBuffer, + StorageOptions, + WriteBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + compression_options=_shared_docs["compression_options"] % "filepath_or_buffer", +) +def to_pickle( + obj: Any, + filepath_or_buffer: FilePath | WriteBuffer[bytes], + compression: CompressionOptions = "infer", + protocol: int = pickle.HIGHEST_PROTOCOL, + storage_options: StorageOptions | None = None, +) -> None: + """ + Pickle (serialize) object to file. + + Parameters + ---------- + obj : any object + Any python object. + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``write()`` function. + Also accepts URL. URL has to be of S3 or GCS. + {compression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + protocol : int + Int which indicates which protocol should be used by the pickler, + default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible + values for this parameter depend on the version of Python. For Python + 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. + For Python >= 3.4, 4 is a valid value. A negative value for the + protocol parameter is equivalent to setting its value to + HIGHEST_PROTOCOL. + + {storage_options} + + .. [1] https://docs.python.org/3/library/pickle.html + + See Also + -------- + read_pickle : Load pickled pandas object (or any object) from file. + DataFrame.to_hdf : Write DataFrame to an HDF5 file. + DataFrame.to_sql : Write DataFrame to a SQL database. + DataFrame.to_parquet : Write a DataFrame to the binary parquet format. + + Examples + -------- + >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP + >>> original_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP + + >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP + >>> unpickled_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + """ # noqa: E501 + if protocol < 0: + protocol = pickle.HIGHEST_PROTOCOL + + with get_handle( + filepath_or_buffer, + "wb", + compression=compression, + is_text=False, + storage_options=storage_options, + ) as handles: + # letting pickle write directly to the buffer is more memory-efficient + pickle.dump(obj, handles.handle, protocol=protocol) + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer", +) +def read_pickle( + filepath_or_buffer: FilePath | ReadPickleBuffer, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, +) -> DataFrame | Series: + """ + Load pickled pandas object (or any object) from file. + + .. warning:: + + Loading pickled data received from untrusted sources can be + unsafe. See `here `__. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``readlines()`` function. + Also accepts URL. URL is not limited to S3 and GCS. + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + Returns + ------- + same type as object stored in file + + See Also + -------- + DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. + Series.to_pickle : Pickle (serialize) Series object to file. + read_hdf : Read HDF5 file into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + read_parquet : Load a parquet object, returning a DataFrame. + + Notes + ----- + read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3 + provided the object was serialized with to_pickle. + + Examples + -------- + >>> original_df = pd.DataFrame( + ... {{"foo": range(5), "bar": range(5, 10)}} + ... ) # doctest: +SKIP + >>> original_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP + + >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP + >>> unpickled_df # doctest: +SKIP + foo bar + 0 0 5 + 1 1 6 + 2 2 7 + 3 3 8 + 4 4 9 + """ + excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError) + with get_handle( + filepath_or_buffer, + "rb", + compression=compression, + is_text=False, + storage_options=storage_options, + ) as handles: + # 1) try standard library Pickle + # 2) try pickle_compat (older pandas version) to handle subclass changes + # 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError + + try: + # TypeError for Cython complaints about object.__new__ vs Tick.__new__ + try: + with warnings.catch_warnings(record=True): + # We want to silence any warnings about, e.g. moved modules. + warnings.simplefilter("ignore", Warning) + return pickle.load(handles.handle) + except excs_to_catch: + # e.g. + # "No module named 'pandas.core.sparse.series'" + # "Can't get attribute '__nat_unpickle' on pd.Series: + """ + Convert to Timestamp if possible, otherwise to datetime.datetime. + SAS float64 lacks precision for more than ms resolution so the fit + to datetime.datetime is ok. + + Parameters + ---------- + sas_datetimes : {Series, Sequence[float]} + Dates or datetimes in SAS + unit : {'d', 's'} + "d" if the floats represent dates, "s" for datetimes + + Returns + ------- + Series + Series of datetime64 dtype or datetime.datetime. + """ + td = (_sas_origin - _unix_origin).as_unit("s") + if unit == "s": + millis = cast_from_unit_vectorized( + sas_datetimes._values, unit="s", out_unit="ms" + ) + dt64ms = millis.view("M8[ms]") + td + return pd.Series(dt64ms, index=sas_datetimes.index, copy=False) + else: + vals = np.array(sas_datetimes, dtype="M8[D]") + td + return pd.Series(vals, dtype="M8[s]", index=sas_datetimes.index, copy=False) + + +class _Column: + col_id: int + name: str | bytes + label: str | bytes + format: str | bytes + ctype: bytes + length: int + + def __init__( + self, + col_id: int, + # These can be bytes when convert_header_text is False + name: str | bytes, + label: str | bytes, + format: str | bytes, + ctype: bytes, + length: int, + ) -> None: + self.col_id = col_id + self.name = name + self.label = label + self.format = format + self.ctype = ctype + self.length = length + + +# SAS7BDAT represents a SAS data file in SAS7BDAT format. +class SAS7BDATReader(ReaderBase, abc.Iterator): + """ + Read SAS files in SAS7BDAT format. + + Parameters + ---------- + path_or_buf : path name or buffer + Name of SAS file or file-like object pointing to SAS file + contents. + index : column identifier, defaults to None + Column to use as index. + convert_dates : bool, defaults to True + Attempt to convert dates to Pandas datetime values. Note that + some rarely used SAS date formats may be unsupported. + blank_missing : bool, defaults to True + Convert empty strings to missing values (SAS uses blanks to + indicate missing character variables). + chunksize : int, defaults to None + Return SAS7BDATReader object for iterations, returns chunks + with given number of lines. + encoding : str, 'infer', defaults to None + String encoding acc. to Python standard encodings, + encoding='infer' tries to detect the encoding from the file header, + encoding=None will leave the data in binary format. + convert_text : bool, defaults to True + If False, text variables are left as raw bytes. + convert_header_text : bool, defaults to True + If False, header text, including column names, are left as raw + bytes. + """ + + _int_length: int + _cached_page: bytes | None + + def __init__( + self, + path_or_buf: FilePath | ReadBuffer[bytes], + index=None, + convert_dates: bool = True, + blank_missing: bool = True, + chunksize: int | None = None, + encoding: str | None = None, + convert_text: bool = True, + convert_header_text: bool = True, + compression: CompressionOptions = "infer", + ) -> None: + self.index = index + self.convert_dates = convert_dates + self.blank_missing = blank_missing + self.chunksize = chunksize + self.encoding = encoding + self.convert_text = convert_text + self.convert_header_text = convert_header_text + + self.default_encoding = "latin-1" + self.compression = b"" + self.column_names_raw: list[bytes] = [] + self.column_names: list[str | bytes] = [] + self.column_formats: list[str | bytes] = [] + self.columns: list[_Column] = [] + + self._current_page_data_subheader_pointers: list[tuple[int, int]] = [] + self._cached_page = None + self._column_data_lengths: list[int] = [] + self._column_data_offsets: list[int] = [] + self._column_types: list[bytes] = [] + + self._current_row_in_file_index = 0 + self._current_row_on_page_index = 0 + self._current_row_in_file_index = 0 + + self.handles = get_handle( + path_or_buf, "rb", is_text=False, compression=compression + ) + + self._path_or_buf = self.handles.handle + + # Same order as const.SASIndex + self._subheader_processors = [ + self._process_rowsize_subheader, + self._process_columnsize_subheader, + self._process_subheader_counts, + self._process_columntext_subheader, + self._process_columnname_subheader, + self._process_columnattributes_subheader, + self._process_format_subheader, + self._process_columnlist_subheader, + None, # Data + ] + + try: + self._get_properties() + self._parse_metadata() + except Exception: + self.close() + raise + + def column_data_lengths(self) -> np.ndarray: + """Return a numpy int64 array of the column data lengths""" + return np.asarray(self._column_data_lengths, dtype=np.int64) + + def column_data_offsets(self) -> np.ndarray: + """Return a numpy int64 array of the column offsets""" + return np.asarray(self._column_data_offsets, dtype=np.int64) + + def column_types(self) -> np.ndarray: + """ + Returns a numpy character array of the column types: + s (string) or d (double) + """ + return np.asarray(self._column_types, dtype=np.dtype("S1")) + + def close(self) -> None: + self.handles.close() + + def _get_properties(self) -> None: + # Check magic number + self._path_or_buf.seek(0) + self._cached_page = self._path_or_buf.read(288) + if self._cached_page[0 : len(const.magic)] != const.magic: + raise ValueError("magic number mismatch (not a SAS file?)") + + # Get alignment information + buf = self._read_bytes(const.align_1_offset, const.align_1_length) + if buf == const.u64_byte_checker_value: + self.U64 = True + self._int_length = 8 + self._page_bit_offset = const.page_bit_offset_x64 + self._subheader_pointer_length = const.subheader_pointer_length_x64 + else: + self.U64 = False + self._page_bit_offset = const.page_bit_offset_x86 + self._subheader_pointer_length = const.subheader_pointer_length_x86 + self._int_length = 4 + buf = self._read_bytes(const.align_2_offset, const.align_2_length) + if buf == const.align_1_checker_value: + align1 = const.align_2_value + else: + align1 = 0 + + # Get endianness information + buf = self._read_bytes(const.endianness_offset, const.endianness_length) + if buf == b"\x01": + self.byte_order = "<" + self.need_byteswap = sys.byteorder == "big" + else: + self.byte_order = ">" + self.need_byteswap = sys.byteorder == "little" + + # Get encoding information + buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] + if buf in const.encoding_names: + self.inferred_encoding = const.encoding_names[buf] + if self.encoding == "infer": + self.encoding = self.inferred_encoding + else: + self.inferred_encoding = f"unknown (code={buf})" + + # Timestamp is epoch 01/01/1960 + epoch = datetime(1960, 1, 1) + x = self._read_float( + const.date_created_offset + align1, const.date_created_length + ) + self.date_created = epoch + pd.to_timedelta(x, unit="s") + x = self._read_float( + const.date_modified_offset + align1, const.date_modified_length + ) + self.date_modified = epoch + pd.to_timedelta(x, unit="s") + + self.header_length = self._read_uint( + const.header_size_offset + align1, const.header_size_length + ) + + # Read the rest of the header into cached_page. + buf = self._path_or_buf.read(self.header_length - 288) + self._cached_page += buf + # error: Argument 1 to "len" has incompatible type "Optional[bytes]"; + # expected "Sized" + if len(self._cached_page) != self.header_length: # type: ignore[arg-type] + raise ValueError("The SAS7BDAT file appears to be truncated.") + + self._page_length = self._read_uint( + const.page_size_offset + align1, const.page_size_length + ) + + def __next__(self) -> DataFrame: + da = self.read(nrows=self.chunksize or 1) + if da.empty: + self.close() + raise StopIteration + return da + + # Read a single float of the given width (4 or 8). + def _read_float(self, offset: int, width: int): + assert self._cached_page is not None + if width == 4: + return read_float_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 8: + return read_double_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + else: + self.close() + raise ValueError("invalid float width") + + # Read a single unsigned integer of the given width (1, 2, 4 or 8). + def _read_uint(self, offset: int, width: int) -> int: + assert self._cached_page is not None + if width == 1: + return self._read_bytes(offset, 1)[0] + elif width == 2: + return read_uint16_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 4: + return read_uint32_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + elif width == 8: + return read_uint64_with_byteswap( + self._cached_page, offset, self.need_byteswap + ) + else: + self.close() + raise ValueError("invalid int width") + + def _read_bytes(self, offset: int, length: int): + assert self._cached_page is not None + if offset + length > len(self._cached_page): + self.close() + raise ValueError("The cached page is too small.") + return self._cached_page[offset : offset + length] + + def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes: + return self._convert_header_text( + self._read_bytes(offset, length).rstrip(b"\x00 ") + ) + + def _parse_metadata(self) -> None: + done = False + while not done: + self._cached_page = self._path_or_buf.read(self._page_length) + if len(self._cached_page) <= 0: + break + if len(self._cached_page) != self._page_length: + raise ValueError("Failed to read a meta data page from the SAS file.") + done = self._process_page_meta() + + def _process_page_meta(self) -> bool: + self._read_page_header() + pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type] + if self._current_page_type in pt: + self._process_page_metadata() + is_data_page = self._current_page_type == const.page_data_type + is_mix_page = self._current_page_type == const.page_mix_type + return bool( + is_data_page + or is_mix_page + or self._current_page_data_subheader_pointers != [] + ) + + def _read_page_header(self) -> None: + bit_offset = self._page_bit_offset + tx = const.page_type_offset + bit_offset + self._current_page_type = ( + self._read_uint(tx, const.page_type_length) & const.page_type_mask2 + ) + tx = const.block_count_offset + bit_offset + self._current_page_block_count = self._read_uint(tx, const.block_count_length) + tx = const.subheader_count_offset + bit_offset + self._current_page_subheaders_count = self._read_uint( + tx, const.subheader_count_length + ) + + def _process_page_metadata(self) -> None: + bit_offset = self._page_bit_offset + + for i in range(self._current_page_subheaders_count): + offset = const.subheader_pointers_offset + bit_offset + total_offset = offset + self._subheader_pointer_length * i + + subheader_offset = self._read_uint(total_offset, self._int_length) + total_offset += self._int_length + + subheader_length = self._read_uint(total_offset, self._int_length) + total_offset += self._int_length + + subheader_compression = self._read_uint(total_offset, 1) + total_offset += 1 + + subheader_type = self._read_uint(total_offset, 1) + + if ( + subheader_length == 0 + or subheader_compression == const.truncated_subheader_id + ): + continue + + subheader_signature = self._read_bytes(subheader_offset, self._int_length) + subheader_index = get_subheader_index(subheader_signature) + subheader_processor = self._subheader_processors[subheader_index] + + if subheader_processor is None: + f1 = subheader_compression in (const.compressed_subheader_id, 0) + f2 = subheader_type == const.compressed_subheader_type + if self.compression and f1 and f2: + self._current_page_data_subheader_pointers.append( + (subheader_offset, subheader_length) + ) + else: + self.close() + raise ValueError( + f"Unknown subheader signature {subheader_signature}" + ) + else: + subheader_processor(subheader_offset, subheader_length) + + def _process_rowsize_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + lcs_offset = offset + lcp_offset = offset + if self.U64: + lcs_offset += 682 + lcp_offset += 706 + else: + lcs_offset += 354 + lcp_offset += 378 + + self.row_length = self._read_uint( + offset + const.row_length_offset_multiplier * int_len, + int_len, + ) + self.row_count = self._read_uint( + offset + const.row_count_offset_multiplier * int_len, + int_len, + ) + self.col_count_p1 = self._read_uint( + offset + const.col_count_p1_multiplier * int_len, int_len + ) + self.col_count_p2 = self._read_uint( + offset + const.col_count_p2_multiplier * int_len, int_len + ) + mx = const.row_count_on_mix_page_offset_multiplier * int_len + self._mix_page_row_count = self._read_uint(offset + mx, int_len) + self._lcs = self._read_uint(lcs_offset, 2) + self._lcp = self._read_uint(lcp_offset, 2) + + def _process_columnsize_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + offset += int_len + self.column_count = self._read_uint(offset, int_len) + if self.col_count_p1 + self.col_count_p2 != self.column_count: + print( + f"Warning: column count mismatch ({self.col_count_p1} + " + f"{self.col_count_p2} != {self.column_count})\n" + ) + + # Unknown purpose + def _process_subheader_counts(self, offset: int, length: int) -> None: + pass + + def _process_columntext_subheader(self, offset: int, length: int) -> None: + offset += self._int_length + text_block_size = self._read_uint(offset, const.text_block_size_length) + + buf = self._read_bytes(offset, text_block_size) + cname_raw = buf[0:text_block_size].rstrip(b"\x00 ") + self.column_names_raw.append(cname_raw) + + if len(self.column_names_raw) == 1: + compression_literal = b"" + for cl in const.compression_literals: + if cl in cname_raw: + compression_literal = cl + self.compression = compression_literal + offset -= self._int_length + + offset1 = offset + 16 + if self.U64: + offset1 += 4 + + buf = self._read_bytes(offset1, self._lcp) + compression_literal = buf.rstrip(b"\x00") + if compression_literal == b"": + self._lcs = 0 + offset1 = offset + 32 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcp) + self.creator_proc = buf[0 : self._lcp] + elif compression_literal == const.rle_compression: + offset1 = offset + 40 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcp) + self.creator_proc = buf[0 : self._lcp] + elif self._lcs > 0: + self._lcp = 0 + offset1 = offset + 16 + if self.U64: + offset1 += 4 + buf = self._read_bytes(offset1, self._lcs) + self.creator_proc = buf[0 : self._lcp] + if hasattr(self, "creator_proc"): + self.creator_proc = self._convert_header_text(self.creator_proc) + + def _process_columnname_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + offset += int_len + column_name_pointers_count = (length - 2 * int_len - 12) // 8 + for i in range(column_name_pointers_count): + text_subheader = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_text_subheader_offset + ) + col_name_offset = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_offset_offset + ) + col_name_length = ( + offset + + const.column_name_pointer_length * (i + 1) + + const.column_name_length_offset + ) + + idx = self._read_uint( + text_subheader, const.column_name_text_subheader_length + ) + col_offset = self._read_uint( + col_name_offset, const.column_name_offset_length + ) + col_len = self._read_uint(col_name_length, const.column_name_length_length) + + name_raw = self.column_names_raw[idx] + cname = name_raw[col_offset : col_offset + col_len] + self.column_names.append(self._convert_header_text(cname)) + + def _process_columnattributes_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8) + for i in range(column_attributes_vectors_count): + col_data_offset = ( + offset + int_len + const.column_data_offset_offset + i * (int_len + 8) + ) + col_data_len = ( + offset + + 2 * int_len + + const.column_data_length_offset + + i * (int_len + 8) + ) + col_types = ( + offset + 2 * int_len + const.column_type_offset + i * (int_len + 8) + ) + + x = self._read_uint(col_data_offset, int_len) + self._column_data_offsets.append(x) + + x = self._read_uint(col_data_len, const.column_data_length_length) + self._column_data_lengths.append(x) + + x = self._read_uint(col_types, const.column_type_length) + self._column_types.append(b"d" if x == 1 else b"s") + + def _process_columnlist_subheader(self, offset: int, length: int) -> None: + # unknown purpose + pass + + def _process_format_subheader(self, offset: int, length: int) -> None: + int_len = self._int_length + text_subheader_format = ( + offset + const.column_format_text_subheader_index_offset + 3 * int_len + ) + col_format_offset = offset + const.column_format_offset_offset + 3 * int_len + col_format_len = offset + const.column_format_length_offset + 3 * int_len + text_subheader_label = ( + offset + const.column_label_text_subheader_index_offset + 3 * int_len + ) + col_label_offset = offset + const.column_label_offset_offset + 3 * int_len + col_label_len = offset + const.column_label_length_offset + 3 * int_len + + x = self._read_uint( + text_subheader_format, const.column_format_text_subheader_index_length + ) + format_idx = min(x, len(self.column_names_raw) - 1) + + format_start = self._read_uint( + col_format_offset, const.column_format_offset_length + ) + format_len = self._read_uint(col_format_len, const.column_format_length_length) + + label_idx = self._read_uint( + text_subheader_label, const.column_label_text_subheader_index_length + ) + label_idx = min(label_idx, len(self.column_names_raw) - 1) + + label_start = self._read_uint( + col_label_offset, const.column_label_offset_length + ) + label_len = self._read_uint(col_label_len, const.column_label_length_length) + + label_names = self.column_names_raw[label_idx] + column_label = self._convert_header_text( + label_names[label_start : label_start + label_len] + ) + format_names = self.column_names_raw[format_idx] + column_format = self._convert_header_text( + format_names[format_start : format_start + format_len] + ) + current_column_number = len(self.columns) + + col = _Column( + current_column_number, + self.column_names[current_column_number], + column_label, + column_format, + self._column_types[current_column_number], + self._column_data_lengths[current_column_number], + ) + + self.column_formats.append(column_format) + self.columns.append(col) + + def read(self, nrows: int | None = None) -> DataFrame: + if (nrows is None) and (self.chunksize is not None): + nrows = self.chunksize + elif nrows is None: + nrows = self.row_count + + if len(self._column_types) == 0: + self.close() + raise EmptyDataError("No columns to parse from file") + + if nrows > 0 and self._current_row_in_file_index >= self.row_count: + return DataFrame() + + nrows = min(nrows, self.row_count - self._current_row_in_file_index) + + nd = self._column_types.count(b"d") + ns = self._column_types.count(b"s") + + self._string_chunk = np.empty((ns, nrows), dtype=object) + self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) + + self._current_row_in_chunk_index = 0 + p = Parser(self) + p.read(nrows) + + rslt = self._chunk_to_dataframe() + if self.index is not None: + rslt = rslt.set_index(self.index) + + return rslt + + def _read_next_page(self): + self._current_page_data_subheader_pointers = [] + self._cached_page = self._path_or_buf.read(self._page_length) + if len(self._cached_page) <= 0: + return True + elif len(self._cached_page) != self._page_length: + self.close() + msg = ( + "failed to read complete page from file (read " + f"{len(self._cached_page):d} of {self._page_length:d} bytes)" + ) + raise ValueError(msg) + + self._read_page_header() + if self._current_page_type in const.page_meta_types: + self._process_page_metadata() + + if self._current_page_type not in const.page_meta_types + [ + const.page_data_type, + const.page_mix_type, + ]: + return self._read_next_page() + + return False + + def _chunk_to_dataframe(self) -> DataFrame: + n = self._current_row_in_chunk_index + m = self._current_row_in_file_index + ix = range(m - n, m) + rslt = {} + + js, jb = 0, 0 + for j in range(self.column_count): + name = self.column_names[j] + + if self._column_types[j] == b"d": + col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d") + rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False) + if self.convert_dates: + if self.column_formats[j] in const.sas_date_formats: + rslt[name] = _convert_datetimes(rslt[name], "d") + elif self.column_formats[j] in const.sas_datetime_formats: + rslt[name] = _convert_datetimes(rslt[name], "s") + jb += 1 + elif self._column_types[j] == b"s": + rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False) + if self.convert_text and (self.encoding is not None): + rslt[name] = self._decode_string(rslt[name].str) + js += 1 + else: + self.close() + raise ValueError(f"unknown column type {repr(self._column_types[j])}") + + df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False) + return df + + def _decode_string(self, b): + return b.decode(self.encoding or self.default_encoding) + + def _convert_header_text(self, b: bytes) -> str | bytes: + if self.convert_header_text: + return self._decode_string(b) + else: + return b diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py new file mode 100644 index 0000000000000000000000000000000000000000..11b2ed0ee73168ba82e3b8d312f96bcea9398e49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py @@ -0,0 +1,508 @@ +""" +Read a SAS XPort format file into a Pandas DataFrame. + +Based on code from Jack Cushman (github.com/jcushman/xport). + +The file format is defined here: + +https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf +""" +from __future__ import annotations + +from collections import abc +from datetime import datetime +import struct +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level + +import pandas as pd + +from pandas.io.common import get_handle +from pandas.io.sas.sasreader import ReaderBase + +if TYPE_CHECKING: + from pandas._typing import ( + CompressionOptions, + DatetimeNaTType, + FilePath, + ReadBuffer, + ) +_correct_line1 = ( + "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_correct_header1 = ( + "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000" +) +_correct_header2 = ( + "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_correct_obs_header = ( + "HEADER RECORD*******OBS HEADER RECORD!!!!!!!" + "000000000000000000000000000000 " +) +_fieldkeys = [ + "ntype", + "nhfun", + "field_length", + "nvar0", + "name", + "label", + "nform", + "nfl", + "num_decimals", + "nfj", + "nfill", + "niform", + "nifl", + "nifd", + "npos", + "_", +] + + +_base_params_doc = """\ +Parameters +---------- +filepath_or_buffer : str or file-like object + Path to SAS file or object implementing binary read method.""" + +_params2_doc = """\ +index : identifier of index column + Identifier of column that should be used as index of the DataFrame. +encoding : str + Encoding for text data. +chunksize : int + Read file `chunksize` lines at a time, returns iterator.""" + +_format_params_doc = """\ +format : str + File format, only `xport` is currently supported.""" + +_iterator_doc = """\ +iterator : bool, default False + Return XportReader object for reading file incrementally.""" + + +_read_sas_doc = f"""Read a SAS file into a DataFrame. + +{_base_params_doc} +{_format_params_doc} +{_params2_doc} +{_iterator_doc} + +Returns +------- +DataFrame or XportReader + +Examples +-------- +Read a SAS Xport file: + +>>> df = pd.read_sas('filename.XPT') + +Read a Xport file in 10,000 line chunks: + +>>> itr = pd.read_sas('filename.XPT', chunksize=10000) +>>> for chunk in itr: +>>> do_something(chunk) + +""" + +_xport_reader_doc = f"""\ +Class for reading SAS Xport files. + +{_base_params_doc} +{_params2_doc} + +Attributes +---------- +member_info : list + Contains information about the file +fields : list + Contains information about the variables in the file +""" + +_read_method_doc = """\ +Read observations from SAS Xport file, returning as data frame. + +Parameters +---------- +nrows : int + Number of rows to read from data file; if None, read whole + file. + +Returns +------- +A DataFrame. +""" + + +def _parse_date(datestr: str) -> DatetimeNaTType: + """Given a date in xport format, return Python date.""" + try: + # e.g. "16FEB11:10:07:55" + return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") + except ValueError: + return pd.NaT + + +def _split_line(s: str, parts): + """ + Parameters + ---------- + s: str + Fixed-length string to split + parts: list of (name, length) pairs + Used to break up string, name '_' will be filtered from output. + + Returns + ------- + Dict of name:contents of string at given location. + """ + out = {} + start = 0 + for name, length in parts: + out[name] = s[start : start + length].strip() + start += length + del out["_"] + return out + + +def _handle_truncated_float_vec(vec, nbytes): + # This feature is not well documented, but some SAS XPORT files + # have 2-7 byte "truncated" floats. To read these truncated + # floats, pad them with zeros on the right to make 8 byte floats. + # + # References: + # https://github.com/jcushman/xport/pull/3 + # The R "foreign" library + + if nbytes != 8: + vec1 = np.zeros(len(vec), np.dtype("S8")) + dtype = np.dtype(f"S{nbytes},S{8 - nbytes}") + vec2 = vec1.view(dtype=dtype) + vec2["f0"] = vec + return vec2 + + return vec + + +def _parse_float_vec(vec): + """ + Parse a vector of float values representing IBM 8 byte floats into + native 8 byte floats. + """ + dtype = np.dtype(">u4,>u4") + vec1 = vec.view(dtype=dtype) + xport1 = vec1["f0"] + xport2 = vec1["f1"] + + # Start by setting first half of ieee number to first half of IBM + # number sans exponent + ieee1 = xport1 & 0x00FFFFFF + + # The fraction bit to the left of the binary point in the ieee + # format was set and the number was shifted 0, 1, 2, or 3 + # places. This will tell us how to adjust the ibm exponent to be a + # power of 2 ieee exponent and how to shift the fraction bits to + # restore the correct magnitude. + shift = np.zeros(len(vec), dtype=np.uint8) + shift[np.where(xport1 & 0x00200000)] = 1 + shift[np.where(xport1 & 0x00400000)] = 2 + shift[np.where(xport1 & 0x00800000)] = 3 + + # shift the ieee number down the correct number of places then + # set the second half of the ieee number to be the second half + # of the ibm number shifted appropriately, ored with the bits + # from the first half that would have been shifted in if we + # could shift a double. All we are worried about are the low + # order 3 bits of the first half since we're only shifting by + # 1, 2, or 3. + ieee1 >>= shift + ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift))) + + # clear the 1 bit to the left of the binary point + ieee1 &= 0xFFEFFFFF + + # set the exponent of the ieee number to be the actual exponent + # plus the shift count + 1023. Or this into the first half of the + # ieee number. The ibm exponent is excess 64 but is adjusted by 65 + # since during conversion to ibm format the exponent is + # incremented by 1 and the fraction bits left 4 positions to the + # right of the radix point. (had to add >> 24 because C treats & + # 0x7f as 0x7f000000 and Python doesn't) + ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | ( + xport1 & 0x80000000 + ) + + ieee = np.empty((len(ieee1),), dtype=">u4,>u4") + ieee["f0"] = ieee1 + ieee["f1"] = ieee2 + ieee = ieee.view(dtype=">f8") + ieee = ieee.astype("f8") + + return ieee + + +class XportReader(ReaderBase, abc.Iterator): + __doc__ = _xport_reader_doc + + def __init__( + self, + filepath_or_buffer: FilePath | ReadBuffer[bytes], + index=None, + encoding: str | None = "ISO-8859-1", + chunksize: int | None = None, + compression: CompressionOptions = "infer", + ) -> None: + self._encoding = encoding + self._lines_read = 0 + self._index = index + self._chunksize = chunksize + + self.handles = get_handle( + filepath_or_buffer, + "rb", + encoding=encoding, + is_text=False, + compression=compression, + ) + self.filepath_or_buffer = self.handles.handle + + try: + self._read_header() + except Exception: + self.close() + raise + + def close(self) -> None: + self.handles.close() + + def _get_row(self): + return self.filepath_or_buffer.read(80).decode() + + def _read_header(self) -> None: + self.filepath_or_buffer.seek(0) + + # read file header + line1 = self._get_row() + if line1 != _correct_line1: + if "**COMPRESSED**" in line1: + # this was created with the PROC CPORT method and can't be read + # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm + raise ValueError( + "Header record indicates a CPORT file, which is not readable." + ) + raise ValueError("Header record is not an XPORT file.") + + line2 = self._get_row() + fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]] + file_info = _split_line(line2, fif) + if file_info["prefix"] != "SAS SAS SASLIB": + raise ValueError("Header record has invalid prefix.") + file_info["created"] = _parse_date(file_info["created"]) + self.file_info = file_info + + line3 = self._get_row() + file_info["modified"] = _parse_date(line3[:16]) + + # read member header + header1 = self._get_row() + header2 = self._get_row() + headflag1 = header1.startswith(_correct_header1) + headflag2 = header2 == _correct_header2 + if not (headflag1 and headflag2): + raise ValueError("Member header not found") + # usually 140, could be 135 + fieldnamelength = int(header1[-5:-2]) + + # member info + mem = [ + ["prefix", 8], + ["set_name", 8], + ["sasdata", 8], + ["version", 8], + ["OS", 8], + ["_", 24], + ["created", 16], + ] + member_info = _split_line(self._get_row(), mem) + mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]] + member_info.update(_split_line(self._get_row(), mem)) + member_info["modified"] = _parse_date(member_info["modified"]) + member_info["created"] = _parse_date(member_info["created"]) + self.member_info = member_info + + # read field names + types = {1: "numeric", 2: "char"} + fieldcount = int(self._get_row()[54:58]) + datalength = fieldnamelength * fieldcount + # round up to nearest 80 + if datalength % 80: + datalength += 80 - datalength % 80 + fielddata = self.filepath_or_buffer.read(datalength) + fields = [] + obs_length = 0 + while len(fielddata) >= fieldnamelength: + # pull data for one field + fieldbytes, fielddata = ( + fielddata[:fieldnamelength], + fielddata[fieldnamelength:], + ) + + # rest at end gets ignored, so if field is short, pad out + # to match struct pattern below + fieldbytes = fieldbytes.ljust(140) + + fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes) + field = dict(zip(_fieldkeys, fieldstruct)) + del field["_"] + field["ntype"] = types[field["ntype"]] + fl = field["field_length"] + if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)): + msg = f"Floating field width {fl} is not between 2 and 8." + raise TypeError(msg) + + for k, v in field.items(): + try: + field[k] = v.strip() + except AttributeError: + pass + + obs_length += field["field_length"] + fields += [field] + + header = self._get_row() + if not header == _correct_obs_header: + raise ValueError("Observation header not found.") + + self.fields = fields + self.record_length = obs_length + self.record_start = self.filepath_or_buffer.tell() + + self.nobs = self._record_count() + self.columns = [x["name"].decode() for x in self.fields] + + # Setup the dtype. + dtypel = [ + ("s" + str(i), "S" + str(field["field_length"])) + for i, field in enumerate(self.fields) + ] + dtype = np.dtype(dtypel) + self._dtype = dtype + + def __next__(self) -> pd.DataFrame: + return self.read(nrows=self._chunksize or 1) + + def _record_count(self) -> int: + """ + Get number of records in file. + + This is maybe suboptimal because we have to seek to the end of + the file. + + Side effect: returns file position to record_start. + """ + self.filepath_or_buffer.seek(0, 2) + total_records_length = self.filepath_or_buffer.tell() - self.record_start + + if total_records_length % 80 != 0: + warnings.warn( + "xport file may be corrupted.", + stacklevel=find_stack_level(), + ) + + if self.record_length > 80: + self.filepath_or_buffer.seek(self.record_start) + return total_records_length // self.record_length + + self.filepath_or_buffer.seek(-80, 2) + last_card_bytes = self.filepath_or_buffer.read(80) + last_card = np.frombuffer(last_card_bytes, dtype=np.uint64) + + # 8 byte blank + ix = np.flatnonzero(last_card == 2314885530818453536) + + if len(ix) == 0: + tail_pad = 0 + else: + tail_pad = 8 * len(ix) + + self.filepath_or_buffer.seek(self.record_start) + + return (total_records_length - tail_pad) // self.record_length + + def get_chunk(self, size: int | None = None) -> pd.DataFrame: + """ + Reads lines from Xport file and returns as dataframe + + Parameters + ---------- + size : int, defaults to None + Number of lines to read. If None, reads whole file. + + Returns + ------- + DataFrame + """ + if size is None: + size = self._chunksize + return self.read(nrows=size) + + def _missing_double(self, vec): + v = vec.view(dtype="u1,u1,u2,u4") + miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0) + miss1 = ( + ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A)) + | (v["f0"] == 0x5F) + | (v["f0"] == 0x2E) + ) + miss &= miss1 + return miss + + @Appender(_read_method_doc) + def read(self, nrows: int | None = None) -> pd.DataFrame: + if nrows is None: + nrows = self.nobs + + read_lines = min(nrows, self.nobs - self._lines_read) + read_len = read_lines * self.record_length + if read_len <= 0: + self.close() + raise StopIteration + raw = self.filepath_or_buffer.read(read_len) + data = np.frombuffer(raw, dtype=self._dtype, count=read_lines) + + df_data = {} + for j, x in enumerate(self.columns): + vec = data["s" + str(j)] + ntype = self.fields[j]["ntype"] + if ntype == "numeric": + vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"]) + miss = self._missing_double(vec) + v = _parse_float_vec(vec) + v[miss] = np.nan + elif self.fields[j]["ntype"] == "char": + v = [y.rstrip() for y in vec] + + if self._encoding is not None: + v = [y.decode(self._encoding) for y in v] + + df_data.update({x: v}) + df = pd.DataFrame(df_data) + + if self._index is None: + df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines)) + else: + df = df.set_index(self._index) + + self._lines_read += read_lines + + return df diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py new file mode 100644 index 0000000000000000000000000000000000000000..c39313d5dc6548fcc014f7a886988a2b9d9001ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sas/sasreader.py @@ -0,0 +1,178 @@ +""" +Read SAS sas7bdat or xport files. +""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + TYPE_CHECKING, + overload, +) + +from pandas.util._decorators import doc + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import stringify_path + +if TYPE_CHECKING: + from collections.abc import Hashable + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + FilePath, + ReadBuffer, + Self, + ) + + from pandas import DataFrame + + +class ReaderBase(ABC): + """ + Protocol for XportReader and SAS7BDATReader classes. + """ + + @abstractmethod + def read(self, nrows: int | None = None) -> DataFrame: + ... + + @abstractmethod + def close(self) -> None: + ... + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +@overload +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = ..., + index: Hashable | None = ..., + encoding: str | None = ..., + chunksize: int = ..., + iterator: bool = ..., + compression: CompressionOptions = ..., +) -> ReaderBase: + ... + + +@overload +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = ..., + index: Hashable | None = ..., + encoding: str | None = ..., + chunksize: None = ..., + iterator: bool = ..., + compression: CompressionOptions = ..., +) -> DataFrame | ReaderBase: + ... + + +@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer") +def read_sas( + filepath_or_buffer: FilePath | ReadBuffer[bytes], + *, + format: str | None = None, + index: Hashable | None = None, + encoding: str | None = None, + chunksize: int | None = None, + iterator: bool = False, + compression: CompressionOptions = "infer", +) -> DataFrame | ReaderBase: + """ + Read SAS files stored as either XPORT or SAS7BDAT format files. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a binary ``read()`` function. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.sas7bdat``. + format : str {{'xport', 'sas7bdat'}} or None + If None, file format is inferred from file extension. If 'xport' or + 'sas7bdat', uses the corresponding format. + index : identifier of index column, defaults to None + Identifier of column that should be used as index of the DataFrame. + encoding : str, default is None + Encoding for text data. If None, text data are stored as raw bytes. + chunksize : int + Read file `chunksize` lines at a time, returns iterator. + iterator : bool, defaults to False + If True, returns an iterator for reading the file incrementally. + {decompression_options} + + Returns + ------- + DataFrame if iterator=False and chunksize=None, else SAS7BDATReader + or XportReader + + Examples + -------- + >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP + """ + if format is None: + buffer_error_msg = ( + "If this is a buffer object rather " + "than a string name, you must specify a format string" + ) + filepath_or_buffer = stringify_path(filepath_or_buffer) + if not isinstance(filepath_or_buffer, str): + raise ValueError(buffer_error_msg) + fname = filepath_or_buffer.lower() + if ".xpt" in fname: + format = "xport" + elif ".sas7bdat" in fname: + format = "sas7bdat" + else: + raise ValueError( + f"unable to infer format of SAS file from filename: {repr(fname)}" + ) + + reader: ReaderBase + if format.lower() == "xport": + from pandas.io.sas.sas_xport import XportReader + + reader = XportReader( + filepath_or_buffer, + index=index, + encoding=encoding, + chunksize=chunksize, + compression=compression, + ) + elif format.lower() == "sas7bdat": + from pandas.io.sas.sas7bdat import SAS7BDATReader + + reader = SAS7BDATReader( + filepath_or_buffer, + index=index, + encoding=encoding, + chunksize=chunksize, + compression=compression, + ) + else: + raise ValueError("unknown SAS format") + + if iterator or chunksize: + return reader + + with reader: + return reader.read() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py new file mode 100644 index 0000000000000000000000000000000000000000..db31a07df79e6de2862e57fd75de0bd4b9c2455d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/spss.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.inference import is_list_like + +from pandas.io.common import stringify_path + +if TYPE_CHECKING: + from collections.abc import Sequence + from pathlib import Path + + from pandas._typing import DtypeBackend + + from pandas import DataFrame + + +def read_spss( + path: str | Path, + usecols: Sequence[str] | None = None, + convert_categoricals: bool = True, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + """ + Load an SPSS file from the file path, returning a DataFrame. + + Parameters + ---------- + path : str or Path + File path. + usecols : list-like, optional + Return a subset of the columns. If None, return all columns. + convert_categoricals : bool, default is True + Convert categorical columns into pd.Categorical. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame + + Examples + -------- + >>> df = pd.read_spss("spss_data.sav") # doctest: +SKIP + """ + pyreadstat = import_optional_dependency("pyreadstat") + check_dtype_backend(dtype_backend) + + if usecols is not None: + if not is_list_like(usecols): + raise TypeError("usecols must be list-like.") + usecols = list(usecols) # pyreadstat requires a list + + df, metadata = pyreadstat.read_sav( + stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals + ) + df.attrs = metadata.__dict__ + if dtype_backend is not lib.no_default: + df = df.convert_dtypes(dtype_backend=dtype_backend) + return df diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..3e17175167f25a4bfc7eb559070927f56dc84eae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/sql.py @@ -0,0 +1,2926 @@ +""" +Collection of query wrappers / abstractions to both facilitate data +retrieval and to reduce dependency on DB-specific API. +""" + +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from contextlib import ( + ExitStack, + contextmanager, +) +from datetime import ( + date, + datetime, + time, +) +from functools import partial +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + DatabaseError, +) +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_dict_like, + is_list_like, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + DatetimeTZDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas import get_option +from pandas.core.api import ( + DataFrame, + Series, +) +from pandas.core.arrays import ArrowExtensionArray +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.common import maybe_make_list +from pandas.core.internals.construction import convert_object_array +from pandas.core.tools.datetimes import to_datetime + +if TYPE_CHECKING: + from collections.abc import ( + Iterator, + Mapping, + ) + + from sqlalchemy import Table + from sqlalchemy.sql.expression import ( + Select, + TextClause, + ) + + from pandas._typing import ( + DateTimeErrorChoices, + DtypeArg, + DtypeBackend, + IndexLabel, + Self, + ) + + from pandas import Index + +# ----------------------------------------------------------------------------- +# -- Helper functions + + +def _process_parse_dates_argument(parse_dates): + """Process parse_dates argument for read_sql functions""" + # handle non-list entries for parse_dates gracefully + if parse_dates is True or parse_dates is None or parse_dates is False: + parse_dates = [] + + elif not hasattr(parse_dates, "__iter__"): + parse_dates = [parse_dates] + return parse_dates + + +def _handle_date_column( + col, utc: bool = False, format: str | dict[str, Any] | None = None +): + if isinstance(format, dict): + # GH35185 Allow custom error values in parse_dates argument of + # read_sql like functions. + # Format can take on custom to_datetime argument values such as + # {"errors": "coerce"} or {"dayfirst": True} + error: DateTimeErrorChoices = format.pop("errors", None) or "ignore" + if error == "ignore": + try: + return to_datetime(col, **format) + except (TypeError, ValueError): + # TODO: not reached 2023-10-27; needed? + return col + return to_datetime(col, errors=error, **format) + else: + # Allow passing of formatting string for integers + # GH17855 + if format is None and ( + issubclass(col.dtype.type, np.floating) + or issubclass(col.dtype.type, np.integer) + ): + format = "s" + if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: + return to_datetime(col, errors="coerce", unit=format, utc=utc) + elif isinstance(col.dtype, DatetimeTZDtype): + # coerce to UTC timezone + # GH11216 + return to_datetime(col, utc=True) + else: + return to_datetime(col, errors="coerce", format=format, utc=utc) + + +def _parse_date_columns(data_frame, parse_dates): + """ + Force non-datetime columns to be read as such. + Supports both string formatted and integer timestamp columns. + """ + parse_dates = _process_parse_dates_argument(parse_dates) + + # we want to coerce datetime64_tz dtypes for now to UTC + # we could in theory do a 'nice' conversion from a FixedOffset tz + # GH11216 + for i, (col_name, df_col) in enumerate(data_frame.items()): + if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates: + try: + fmt = parse_dates[col_name] + except (KeyError, TypeError): + fmt = None + data_frame.isetitem(i, _handle_date_column(df_col, format=fmt)) + + return data_frame + + +def _convert_arrays_to_dataframe( + data, + columns, + coerce_float: bool = True, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", +) -> DataFrame: + content = lib.to_object_array_tuples(data) + arrays = convert_object_array( + list(content.T), + dtype=None, + coerce_float=coerce_float, + dtype_backend=dtype_backend, + ) + if dtype_backend == "pyarrow": + pa = import_optional_dependency("pyarrow") + + result_arrays = [] + for arr in arrays: + pa_array = pa.array(arr, from_pandas=True) + if arr.dtype == "string": + # TODO: Arrow still infers strings arrays as regular strings instead + # of large_string, which is what we preserver everywhere else for + # dtype_backend="pyarrow". We may want to reconsider this + pa_array = pa_array.cast(pa.string()) + result_arrays.append(ArrowExtensionArray(pa_array)) + arrays = result_arrays # type: ignore[assignment] + if arrays: + df = DataFrame(dict(zip(list(range(len(columns))), arrays))) + df.columns = columns + return df + else: + return DataFrame(columns=columns) + + +def _wrap_result( + data, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", +): + """Wrap result set of a SQLAlchemy query in a DataFrame.""" + frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend) + + if dtype: + frame = frame.astype(dtype) + + frame = _parse_date_columns(frame, parse_dates) + + if index_col is not None: + frame = frame.set_index(index_col) + + return frame + + +def _wrap_result_adbc( + df: DataFrame, + *, + index_col=None, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", +) -> DataFrame: + """Wrap result set of a SQLAlchemy query in a DataFrame.""" + if dtype: + df = df.astype(dtype) + + df = _parse_date_columns(df, parse_dates) + + if index_col is not None: + df = df.set_index(index_col) + + return df + + +def execute(sql, con, params=None): + """ + Execute the given SQL query using the provided connection object. + + Parameters + ---------- + sql : string + SQL query to be executed. + con : SQLAlchemy connection or sqlite3 connection + If a DBAPI2 object, only sqlite3 is supported. + params : list or tuple, optional, default: None + List of parameters to pass to execute method. + + Returns + ------- + Results Iterable + """ + warnings.warn( + "`pandas.io.sql.execute` is deprecated and " + "will be removed in the future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) # GH50185 + sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") + + if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)): + raise TypeError("pandas.io.sql.execute requires a connection") # GH50185 + with pandasSQL_builder(con, need_transaction=True) as pandas_sql: + return pandas_sql.execute(sql, params) + + +# ----------------------------------------------------------------------------- +# -- Read and write to DataFrames + + +@overload +def read_sql_table( + table_name: str, + con, + schema=..., + index_col: str | list[str] | None = ..., + coerce_float=..., + parse_dates: list[str] | dict[str, str] | None = ..., + columns: list[str] | None = ..., + chunksize: None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_sql_table( + table_name: str, + con, + schema=..., + index_col: str | list[str] | None = ..., + coerce_float=..., + parse_dates: list[str] | dict[str, str] | None = ..., + columns: list[str] | None = ..., + chunksize: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> Iterator[DataFrame]: + ... + + +def read_sql_table( + table_name: str, + con, + schema: str | None = None, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates: list[str] | dict[str, str] | None = None, + columns: list[str] | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL database table into a DataFrame. + + Given a table name and a SQLAlchemy connectable, returns a DataFrame. + This function does not support DBAPI connections. + + Parameters + ---------- + table_name : str + Name of SQL table in database. + con : SQLAlchemy connectable or str + A database URI could be provided as str. + SQLite DBAPI connection mode not supported. + schema : str, default None + Name of SQL schema in database to query (if database flavor + supports this). Uses default schema if None (default). + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point. Can result in loss of Precision. + parse_dates : list or dict, default None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default None + List of column names to select from SQL table. + chunksize : int, default None + If specified, returns an iterator where `chunksize` is the number of + rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + A SQL table is returned as two-dimensional data structure with labeled + axes. + + See Also + -------- + read_sql_query : Read SQL query into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + + Notes + ----- + Any datetime values with time zone information will be converted to UTC. + + Examples + -------- + >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: + if not pandas_sql.has_table(table_name): + raise ValueError(f"Table {table_name} not found") + + table = pandas_sql.read_table( + table_name, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + + if table is not None: + return table + else: + raise ValueError(f"Table {table_name} not found", con) + + +@overload +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params: list[Any] | Mapping[str, Any] | None = ..., + parse_dates: list[str] | dict[str, str] | None = ..., + chunksize: None = ..., + dtype: DtypeArg | None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +@overload +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params: list[Any] | Mapping[str, Any] | None = ..., + parse_dates: list[str] | dict[str, str] | None = ..., + chunksize: int = ..., + dtype: DtypeArg | None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> Iterator[DataFrame]: + ... + + +def read_sql_query( + sql, + con, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + params: list[Any] | Mapping[str, Any] | None = None, + parse_dates: list[str] | dict[str, str] | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query into a DataFrame. + + Returns a DataFrame corresponding to the result set of the query + string. Optionally provide an `index_col` parameter to use one of the + columns as the index, otherwise default integer index will be used. + + Parameters + ---------- + sql : str SQL query or SQLAlchemy Selectable (select or text object) + SQL query to be executed. + con : SQLAlchemy connectable, str, or sqlite3 connection + Using SQLAlchemy makes it possible to use any DB supported by that + library. If a DBAPI2 object, only sqlite3 is supported. + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point. Useful for SQL result sets. + params : list, tuple or mapping, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number of + rows to include in each chunk. + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'}. + + .. versionadded:: 1.3.0 + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql : Read SQL query or database table into a DataFrame. + + Notes + ----- + Any datetime values with time zone information parsed via the `parse_dates` + parameter will be converted to UTC. + + Examples + -------- + >>> from sqlalchemy import create_engine # doctest: +SKIP + >>> engine = create_engine("sqlite:///database.db") # doctest: +SKIP + >>> with engine.connect() as conn, conn.begin(): # doctest: +SKIP + ... data = pd.read_sql_table("data", conn) # doctest: +SKIP + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con) as pandas_sql: + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + +@overload +def read_sql( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params=..., + parse_dates=..., + columns: list[str] = ..., + chunksize: None = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + dtype: DtypeArg | None = None, +) -> DataFrame: + ... + + +@overload +def read_sql( + sql, + con, + index_col: str | list[str] | None = ..., + coerce_float=..., + params=..., + parse_dates=..., + columns: list[str] = ..., + chunksize: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + dtype: DtypeArg | None = None, +) -> Iterator[DataFrame]: + ... + + +def read_sql( + sql, + con, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + params=None, + parse_dates=None, + columns: list[str] | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + dtype: DtypeArg | None = None, +) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query or database table into a DataFrame. + + This function is a convenience wrapper around ``read_sql_table`` and + ``read_sql_query`` (for backward compatibility). It will delegate + to the specific function depending on the provided input. A SQL query + will be routed to ``read_sql_query``, while a database table name will + be routed to ``read_sql_table``. Note that the delegated function might + have more specific notes about their functionality not listed here. + + Parameters + ---------- + sql : str or SQLAlchemy Selectable (select or text object) + SQL query to be executed or a table name. + con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection + ADBC provides high performance I/O with native type support, where available. + Using SQLAlchemy makes it possible to use any DB supported by that + library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible + for engine disposal and connection closure for the ADBC connection and + SQLAlchemy connectable; str connections are closed automatically. See + `here `_. + index_col : str or list of str, optional, default: None + Column(s) to set as index(MultiIndex). + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + params : list, tuple or dict, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict corresponds + to the keyword arguments of :func:`pandas.to_datetime` + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default: None + List of column names to select from SQL table (only used when reading + a table). + chunksize : int, default None + If specified, return an iterator where `chunksize` is the + number of rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'}. + The argument is ignored if a table is passed instead of a query. + + .. versionadded:: 2.0.0 + + Returns + ------- + DataFrame or Iterator[DataFrame] + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql_query : Read SQL query into a DataFrame. + + Examples + -------- + Read data from SQL via either a SQL query or a SQL tablename. + When using a SQLite database only SQL queries are accepted, + providing only the SQL tablename will result in an error. + + >>> from sqlite3 import connect + >>> conn = connect(':memory:') + >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']], + ... columns=['int_column', 'date_column']) + >>> df.to_sql(name='test_data', con=conn) + 2 + + >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn) + int_column date_column + 0 0 10/11/12 + 1 1 12/11/10 + + >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP + + Apply date parsing to columns through the ``parse_dates`` argument + The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns. + Custom argument values for applying ``pd.to_datetime`` on a column are specified + via a dictionary format: + + >>> pd.read_sql('SELECT int_column, date_column FROM test_data', + ... conn, + ... parse_dates={"date_column": {"format": "%d/%m/%y"}}) + int_column date_column + 0 0 2012-11-10 + 1 1 2010-11-12 + + .. versionadded:: 2.2.0 + + pandas now supports reading via ADBC drivers + + >>> from adbc_driver_postgresql import dbapi # doctest:+SKIP + >>> with dbapi.connect('postgres:///db_name') as conn: # doctest:+SKIP + ... pd.read_sql('SELECT int_column FROM test_data', conn) + int_column + 0 0 + 1 1 + """ + + check_dtype_backend(dtype_backend) + if dtype_backend is lib.no_default: + dtype_backend = "numpy" # type: ignore[assignment] + assert dtype_backend is not lib.no_default + + with pandasSQL_builder(con) as pandas_sql: + if isinstance(pandas_sql, SQLiteDatabase): + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype_backend=dtype_backend, + dtype=dtype, + ) + + try: + _is_table_name = pandas_sql.has_table(sql) + except Exception: + # using generic exception to catch errors from sql drivers (GH24988) + _is_table_name = False + + if _is_table_name: + return pandas_sql.read_table( + sql, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + else: + return pandas_sql.read_query( + sql, + index_col=index_col, + params=params, + coerce_float=coerce_float, + parse_dates=parse_dates, + chunksize=chunksize, + dtype_backend=dtype_backend, + dtype=dtype, + ) + + +def to_sql( + frame, + name: str, + con, + schema: str | None = None, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label: IndexLabel | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, +) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame, Series + name : str + Name of SQL table. + con : ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection + or sqlite3 DBAPI2 connection + ADBC provides high performance I/O with native type support, where available. + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object, only sqlite3 is supported. + schema : str, optional + Name of SQL schema in database to write to (if database flavor + supports this). If None, use default schema (default). + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : bool, default True + Write DataFrame index as a column. + index_label : str or sequence, optional + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + chunksize : int, optional + Specify the number of rows in each batch to be written at a time. + By default, all rows will be written at once. + dtype : dict or scalar, optional + Specifying the datatype for columns. If a dictionary is used, the + keys should be the column names and the values should be the + SQLAlchemy types or strings for the sqlite3 fallback mode. If a + scalar is provided, it will be applied to all columns. + method : {None, 'multi', callable}, optional + Controls the SQL insertion clause used: + + - None : Uses standard SQL ``INSERT`` clause (one per row). + - ``'multi'``: Pass multiple values in a single ``INSERT`` clause. + - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. + + Returns + ------- + None or int + Number of rows affected by to_sql. None is returned if the callable + passed into ``method`` does not return an integer number of rows. + + .. versionadded:: 1.4.0 + + Notes + ----- + The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` + or SQLAlchemy connectable. If using ADBC the returned rows are the result + of ``Cursor.adbc_ingest``. The returned value may not reflect the exact number of written + rows as stipulated in the + `sqlite3 `__ or + `SQLAlchemy `__ + """ # noqa: E501 + if if_exists not in ("fail", "replace", "append"): + raise ValueError(f"'{if_exists}' is not valid for if_exists") + + if isinstance(frame, Series): + frame = frame.to_frame() + elif not isinstance(frame, DataFrame): + raise NotImplementedError( + "'frame' argument should be either a Series or a DataFrame" + ) + + with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: + return pandas_sql.to_sql( + frame, + name, + if_exists=if_exists, + index=index, + index_label=index_label, + schema=schema, + chunksize=chunksize, + dtype=dtype, + method=method, + engine=engine, + **engine_kwargs, + ) + + +def has_table(table_name: str, con, schema: str | None = None) -> bool: + """ + Check if DataBase has named table. + + Parameters + ---------- + table_name: string + Name of SQL table. + con: ADBC Connection, SQLAlchemy connectable, str, or sqlite3 connection + ADBC provides high performance I/O with native type support, where available. + Using SQLAlchemy makes it possible to use any DB supported by that + library. + If a DBAPI2 object, only sqlite3 is supported. + schema : string, default None + Name of SQL schema in database to write to (if database flavor supports + this). If None, use default schema (default). + + Returns + ------- + boolean + """ + with pandasSQL_builder(con, schema=schema) as pandas_sql: + return pandas_sql.has_table(table_name) + + +table_exists = has_table + + +def pandasSQL_builder( + con, + schema: str | None = None, + need_transaction: bool = False, +) -> PandasSQL: + """ + Convenience function to return the correct PandasSQL subclass based on the + provided parameters. Also creates a sqlalchemy connection and transaction + if necessary. + """ + import sqlite3 + + if isinstance(con, sqlite3.Connection) or con is None: + return SQLiteDatabase(con) + + sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") + + if isinstance(con, str) and sqlalchemy is None: + raise ImportError("Using URI string without sqlalchemy installed.") + + if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): + return SQLDatabase(con, schema, need_transaction) + + adbc = import_optional_dependency("adbc_driver_manager.dbapi", errors="ignore") + if adbc and isinstance(con, adbc.Connection): + return ADBCDatabase(con) + + warnings.warn( + "pandas only supports SQLAlchemy connectable (engine/connection) or " + "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " + "objects are not tested. Please consider using SQLAlchemy.", + UserWarning, + stacklevel=find_stack_level(), + ) + return SQLiteDatabase(con) + + +class SQLTable(PandasObject): + """ + For mapping Pandas tables to SQL tables. + Uses fact that table is reflected by SQLAlchemy to + do better type conversions. + Also holds various flags needed to avoid having to + pass them between functions all the time. + """ + + # TODO: support for multiIndex + + def __init__( + self, + name: str, + pandas_sql_engine, + frame=None, + index: bool | str | list[str] | None = True, + if_exists: Literal["fail", "replace", "append"] = "fail", + prefix: str = "pandas", + index_label=None, + schema=None, + keys=None, + dtype: DtypeArg | None = None, + ) -> None: + self.name = name + self.pd_sql = pandas_sql_engine + self.prefix = prefix + self.frame = frame + self.index = self._index_name(index, index_label) + self.schema = schema + self.if_exists = if_exists + self.keys = keys + self.dtype = dtype + + if frame is not None: + # We want to initialize based on a dataframe + self.table = self._create_table_setup() + else: + # no data provided, read-only mode + self.table = self.pd_sql.get_table(self.name, self.schema) + + if self.table is None: + raise ValueError(f"Could not init table '{name}'") + + if not len(self.name): + raise ValueError("Empty table name specified") + + def exists(self): + return self.pd_sql.has_table(self.name, self.schema) + + def sql_schema(self) -> str: + from sqlalchemy.schema import CreateTable + + return str(CreateTable(self.table).compile(self.pd_sql.con)) + + def _execute_create(self) -> None: + # Inserting table into database, add to MetaData object + self.table = self.table.to_metadata(self.pd_sql.meta) + with self.pd_sql.run_transaction(): + self.table.create(bind=self.pd_sql.con) + + def create(self) -> None: + if self.exists(): + if self.if_exists == "fail": + raise ValueError(f"Table '{self.name}' already exists.") + if self.if_exists == "replace": + self.pd_sql.drop_table(self.name, self.schema) + self._execute_create() + elif self.if_exists == "append": + pass + else: + raise ValueError(f"'{self.if_exists}' is not valid for if_exists") + else: + self._execute_create() + + def _execute_insert(self, conn, keys: list[str], data_iter) -> int: + """ + Execute SQL statement inserting data + + Parameters + ---------- + conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection + keys : list of str + Column names + data_iter : generator of list + Each item contains a list of values to be inserted + """ + data = [dict(zip(keys, row)) for row in data_iter] + result = conn.execute(self.table.insert(), data) + return result.rowcount + + def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int: + """ + Alternative to _execute_insert for DBs support multi-value INSERT. + + Note: multi-value insert is usually faster for analytics DBs + and tables containing a few columns + but performance degrades quickly with increase of columns. + + """ + + from sqlalchemy import insert + + data = [dict(zip(keys, row)) for row in data_iter] + stmt = insert(self.table).values(data) + result = conn.execute(stmt) + return result.rowcount + + def insert_data(self) -> tuple[list[str], list[np.ndarray]]: + if self.index is not None: + temp = self.frame.copy() + temp.index.names = self.index + try: + temp.reset_index(inplace=True) + except ValueError as err: + raise ValueError(f"duplicate name in index/columns: {err}") from err + else: + temp = self.frame + + column_names = list(map(str, temp.columns)) + ncols = len(column_names) + # this just pre-allocates the list: None's will be replaced with ndarrays + # error: List item 0 has incompatible type "None"; expected "ndarray" + data_list: list[np.ndarray] = [None] * ncols # type: ignore[list-item] + + for i, (_, ser) in enumerate(temp.items()): + if ser.dtype.kind == "M": + if isinstance(ser._values, ArrowExtensionArray): + import pyarrow as pa + + if pa.types.is_date(ser.dtype.pyarrow_dtype): + # GH#53854 to_pydatetime not supported for pyarrow date dtypes + d = ser._values.to_numpy(dtype=object) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + # GH#52459 to_pydatetime will return Index[object] + d = np.asarray(ser.dt.to_pydatetime(), dtype=object) + else: + d = ser._values.to_pydatetime() + elif ser.dtype.kind == "m": + vals = ser._values + if isinstance(vals, ArrowExtensionArray): + vals = vals.to_numpy(dtype=np.dtype("m8[ns]")) + # store as integers, see GH#6921, GH#7076 + d = vals.view("i8").astype(object) + else: + d = ser._values.astype(object) + + assert isinstance(d, np.ndarray), type(d) + + if ser._can_hold_na: + # Note: this will miss timedeltas since they are converted to int + mask = isna(d) + d[mask] = None + + data_list[i] = d + + return column_names, data_list + + def insert( + self, + chunksize: int | None = None, + method: Literal["multi"] | Callable | None = None, + ) -> int | None: + # set insert method + if method is None: + exec_insert = self._execute_insert + elif method == "multi": + exec_insert = self._execute_insert_multi + elif callable(method): + exec_insert = partial(method, self) + else: + raise ValueError(f"Invalid parameter `method`: {method}") + + keys, data_list = self.insert_data() + + nrows = len(self.frame) + + if nrows == 0: + return 0 + + if chunksize is None: + chunksize = nrows + elif chunksize == 0: + raise ValueError("chunksize argument should be non-zero") + + chunks = (nrows // chunksize) + 1 + total_inserted = None + with self.pd_sql.run_transaction() as conn: + for i in range(chunks): + start_i = i * chunksize + end_i = min((i + 1) * chunksize, nrows) + if start_i >= end_i: + break + + chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list)) + num_inserted = exec_insert(conn, keys, chunk_iter) + # GH 46891 + if num_inserted is not None: + if total_inserted is None: + total_inserted = num_inserted + else: + total_inserted += num_inserted + return total_inserted + + def _query_iterator( + self, + result, + exit_stack: ExitStack, + chunksize: int | None, + columns, + coerce_float: bool = True, + parse_dates=None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set.""" + has_read_data = False + with exit_stack: + while True: + data = result.fetchmany(chunksize) + if not data: + if not has_read_data: + yield DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) + break + + has_read_data = True + self.frame = _convert_arrays_to_dataframe( + data, columns, coerce_float, dtype_backend + ) + + self._harmonize_columns( + parse_dates=parse_dates, dtype_backend=dtype_backend + ) + + if self.index is not None: + self.frame.set_index(self.index, inplace=True) + + yield self.frame + + def read( + self, + exit_stack: ExitStack, + coerce_float: bool = True, + parse_dates=None, + columns=None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + from sqlalchemy import select + + if columns is not None and len(columns) > 0: + cols = [self.table.c[n] for n in columns] + if self.index is not None: + for idx in self.index[::-1]: + cols.insert(0, self.table.c[idx]) + sql_select = select(*cols) + else: + sql_select = select(self.table) + result = self.pd_sql.execute(sql_select) + column_names = result.keys() + + if chunksize is not None: + return self._query_iterator( + result, + exit_stack, + chunksize, + column_names, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype_backend=dtype_backend, + ) + else: + data = result.fetchall() + self.frame = _convert_arrays_to_dataframe( + data, column_names, coerce_float, dtype_backend + ) + + self._harmonize_columns( + parse_dates=parse_dates, dtype_backend=dtype_backend + ) + + if self.index is not None: + self.frame.set_index(self.index, inplace=True) + + return self.frame + + def _index_name(self, index, index_label): + # for writing: index=True to include index in sql table + if index is True: + nlevels = self.frame.index.nlevels + # if index_label is specified, set this as index name(s) + if index_label is not None: + if not isinstance(index_label, list): + index_label = [index_label] + if len(index_label) != nlevels: + raise ValueError( + "Length of 'index_label' should match number of " + f"levels, which is {nlevels}" + ) + return index_label + # return the used column labels for the index columns + if ( + nlevels == 1 + and "index" not in self.frame.columns + and self.frame.index.name is None + ): + return ["index"] + else: + return com.fill_missing_names(self.frame.index.names) + + # for reading: index=(list of) string to specify column to set as index + elif isinstance(index, str): + return [index] + elif isinstance(index, list): + return index + else: + return None + + def _get_column_names_and_types(self, dtype_mapper): + column_names_and_types = [] + if self.index is not None: + for i, idx_label in enumerate(self.index): + idx_type = dtype_mapper(self.frame.index._get_level_values(i)) + column_names_and_types.append((str(idx_label), idx_type, True)) + + column_names_and_types += [ + (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) + for i in range(len(self.frame.columns)) + ] + + return column_names_and_types + + def _create_table_setup(self): + from sqlalchemy import ( + Column, + PrimaryKeyConstraint, + Table, + ) + from sqlalchemy.schema import MetaData + + column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) + + columns: list[Any] = [ + Column(name, typ, index=is_index) + for name, typ, is_index in column_names_and_types + ] + + if self.keys is not None: + if not is_list_like(self.keys): + keys = [self.keys] + else: + keys = self.keys + pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk") + columns.append(pkc) + + schema = self.schema or self.pd_sql.meta.schema + + # At this point, attach to new metadata, only attach to self.meta + # once table is created. + meta = MetaData() + return Table(self.name, meta, *columns, schema=schema) + + def _harmonize_columns( + self, + parse_dates=None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> None: + """ + Make the DataFrame's column types align with the SQL table + column types. + Need to work around limited NA value support. Floats are always + fine, ints must always be floats if there are Null values. + Booleans are hard because converting bool column with None replaces + all Nones with false. Therefore only convert bool if there are no + NA values. + Datetimes should already be converted to np.datetime64 if supported, + but here we also force conversion if required. + """ + parse_dates = _process_parse_dates_argument(parse_dates) + + for sql_col in self.table.columns: + col_name = sql_col.name + try: + df_col = self.frame[col_name] + + # Handle date parsing upfront; don't try to convert columns + # twice + if col_name in parse_dates: + try: + fmt = parse_dates[col_name] + except TypeError: + fmt = None + self.frame[col_name] = _handle_date_column(df_col, format=fmt) + continue + + # the type the dataframe column should have + col_type = self._get_dtype(sql_col.type) + + if ( + col_type is datetime + or col_type is date + or col_type is DatetimeTZDtype + ): + # Convert tz-aware Datetime SQL columns to UTC + utc = col_type is DatetimeTZDtype + self.frame[col_name] = _handle_date_column(df_col, utc=utc) + elif dtype_backend == "numpy" and col_type is float: + # floats support NA, can always convert! + self.frame[col_name] = df_col.astype(col_type, copy=False) + + elif dtype_backend == "numpy" and len(df_col) == df_col.count(): + # No NA values, can convert ints and bools + if col_type is np.dtype("int64") or col_type is bool: + self.frame[col_name] = df_col.astype(col_type, copy=False) + except KeyError: + pass # this column not in results + + def _sqlalchemy_type(self, col: Index | Series): + dtype: DtypeArg = self.dtype or {} + if is_dict_like(dtype): + dtype = cast(dict, dtype) + if col.name in dtype: + return dtype[col.name] + + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) + + from sqlalchemy.types import ( + TIMESTAMP, + BigInteger, + Boolean, + Date, + DateTime, + Float, + Integer, + SmallInteger, + Text, + Time, + ) + + if col_type in ("datetime64", "datetime"): + # GH 9086: TIMESTAMP is the suggested type if the column contains + # timezone information + try: + # error: Item "Index" of "Union[Index, Series]" has no attribute "dt" + if col.dt.tz is not None: # type: ignore[union-attr] + return TIMESTAMP(timezone=True) + except AttributeError: + # The column is actually a DatetimeIndex + # GH 26761 or an Index with date-like data e.g. 9999-01-01 + if getattr(col, "tz", None) is not None: + return TIMESTAMP(timezone=True) + return DateTime + if col_type == "timedelta64": + warnings.warn( + "the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the database.", + UserWarning, + stacklevel=find_stack_level(), + ) + return BigInteger + elif col_type == "floating": + if col.dtype == "float32": + return Float(precision=23) + else: + return Float(precision=53) + elif col_type == "integer": + # GH35076 Map pandas integer to optimal SQLAlchemy integer type + if col.dtype.name.lower() in ("int8", "uint8", "int16"): + return SmallInteger + elif col.dtype.name.lower() in ("uint16", "int32"): + return Integer + elif col.dtype.name.lower() == "uint64": + raise ValueError("Unsigned 64 bit integer datatype is not supported") + else: + return BigInteger + elif col_type == "boolean": + return Boolean + elif col_type == "date": + return Date + elif col_type == "time": + return Time + elif col_type == "complex": + raise ValueError("Complex datatypes not supported") + + return Text + + def _get_dtype(self, sqltype): + from sqlalchemy.types import ( + TIMESTAMP, + Boolean, + Date, + DateTime, + Float, + Integer, + ) + + if isinstance(sqltype, Float): + return float + elif isinstance(sqltype, Integer): + # TODO: Refine integer size. + return np.dtype("int64") + elif isinstance(sqltype, TIMESTAMP): + # we have a timezone capable type + if not sqltype.timezone: + return datetime + return DatetimeTZDtype + elif isinstance(sqltype, DateTime): + # Caution: np.datetime64 is also a subclass of np.number. + return datetime + elif isinstance(sqltype, Date): + return date + elif isinstance(sqltype, Boolean): + return bool + return object + + +class PandasSQL(PandasObject, ABC): + """ + Subclasses Should define read_query and to_sql. + """ + + def __enter__(self) -> Self: + return self + + def __exit__(self, *args) -> None: + pass + + def read_table( + self, + table_name: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + columns=None, + schema: str | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + raise NotImplementedError + + @abstractmethod + def read_query( + self, + sql: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + pass + + @abstractmethod + def to_sql( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label=None, + schema=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + pass + + @abstractmethod + def execute(self, sql: str | Select | TextClause, params=None): + pass + + @abstractmethod + def has_table(self, name: str, schema: str | None = None) -> bool: + pass + + @abstractmethod + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: list[str] | None = None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ) -> str: + pass + + +class BaseEngine: + def insert_records( + self, + table: SQLTable, + con, + frame, + name: str, + index: bool | str | list[str] | None = True, + schema=None, + chunksize: int | None = None, + method=None, + **engine_kwargs, + ) -> int | None: + """ + Inserts data into already-prepared table + """ + raise AbstractMethodError(self) + + +class SQLAlchemyEngine(BaseEngine): + def __init__(self) -> None: + import_optional_dependency( + "sqlalchemy", extra="sqlalchemy is required for SQL support." + ) + + def insert_records( + self, + table: SQLTable, + con, + frame, + name: str, + index: bool | str | list[str] | None = True, + schema=None, + chunksize: int | None = None, + method=None, + **engine_kwargs, + ) -> int | None: + from sqlalchemy import exc + + try: + return table.insert(chunksize=chunksize, method=method) + except exc.StatementError as err: + # GH34431 + # https://stackoverflow.com/a/67358288/6067848 + msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?# + )|inf can not be used with MySQL""" + err_text = str(err.orig) + if re.search(msg, err_text): + raise ValueError("inf cannot be used with MySQL") from err + raise err + + +def get_engine(engine: str) -> BaseEngine: + """return our implementation""" + if engine == "auto": + engine = get_option("io.sql.engine") + + if engine == "auto": + # try engines in this order + engine_classes = [SQLAlchemyEngine] + + error_msgs = "" + for engine_class in engine_classes: + try: + return engine_class() + except ImportError as err: + error_msgs += "\n - " + str(err) + + raise ImportError( + "Unable to find a usable engine; " + "tried using: 'sqlalchemy'.\n" + "A suitable version of " + "sqlalchemy is required for sql I/O " + "support.\n" + "Trying to import the above resulted in these errors:" + f"{error_msgs}" + ) + + if engine == "sqlalchemy": + return SQLAlchemyEngine() + + raise ValueError("engine must be one of 'auto', 'sqlalchemy'") + + +class SQLDatabase(PandasSQL): + """ + This class enables conversion between DataFrame and SQL databases + using SQLAlchemy to handle DataBase abstraction. + + Parameters + ---------- + con : SQLAlchemy Connectable or URI string. + Connectable to connect with the database. Using SQLAlchemy makes it + possible to use any DB supported by that library. + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If None, use default schema (default). + need_transaction : bool, default False + If True, SQLDatabase will create a transaction. + + """ + + def __init__( + self, con, schema: str | None = None, need_transaction: bool = False + ) -> None: + from sqlalchemy import create_engine + from sqlalchemy.engine import Engine + from sqlalchemy.schema import MetaData + + # self.exit_stack cleans up the Engine and Connection and commits the + # transaction if any of those objects was created below. + # Cleanup happens either in self.__exit__ or at the end of the iterator + # returned by read_sql when chunksize is not None. + self.exit_stack = ExitStack() + if isinstance(con, str): + con = create_engine(con) + self.exit_stack.callback(con.dispose) + if isinstance(con, Engine): + con = self.exit_stack.enter_context(con.connect()) + if need_transaction and not con.in_transaction(): + self.exit_stack.enter_context(con.begin()) + self.con = con + self.meta = MetaData(schema=schema) + self.returns_generator = False + + def __exit__(self, *args) -> None: + if not self.returns_generator: + self.exit_stack.close() + + @contextmanager + def run_transaction(self): + if not self.con.in_transaction(): + with self.con.begin(): + yield self.con + else: + yield self.con + + def execute(self, sql: str | Select | TextClause, params=None): + """Simple passthrough to SQLAlchemy connectable""" + args = [] if params is None else [params] + if isinstance(sql, str): + return self.con.exec_driver_sql(sql, *args) + return self.con.execute(sql, *args) + + def read_table( + self, + table_name: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + columns=None, + schema: str | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL database table into a DataFrame. + + Parameters + ---------- + table_name : str + Name of SQL table in database. + index_col : string, optional, default: None + Column to set as index. + coerce_float : bool, default True + Attempts to convert values of non-string, non-numeric objects + (like decimal.Decimal) to floating point. This can result in + loss of precision. + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg}``, where the arg corresponds + to the keyword arguments of :func:`pandas.to_datetime`. + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default: None + List of column names to select from SQL table. + schema : string, default None + Name of SQL schema in database to query (if database flavor + supports this). If specified, this overwrites the default + schema of the SQL database object. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number + of rows to include in each chunk. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame + + See Also + -------- + pandas.read_sql_table + SQLDatabase.read_query + + """ + self.meta.reflect(bind=self.con, only=[table_name], views=True) + table = SQLTable(table_name, self, index=index_col, schema=schema) + if chunksize is not None: + self.returns_generator = True + return table.read( + self.exit_stack, + coerce_float=coerce_float, + parse_dates=parse_dates, + columns=columns, + chunksize=chunksize, + dtype_backend=dtype_backend, + ) + + @staticmethod + def _query_iterator( + result, + exit_stack: ExitStack, + chunksize: int, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set""" + has_read_data = False + with exit_stack: + while True: + data = result.fetchmany(chunksize) + if not data: + if not has_read_data: + yield _wrap_result( + [], + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + break + + has_read_data = True + yield _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + def read_query( + self, + sql: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query into a DataFrame. + + Parameters + ---------- + sql : str + SQL query to be executed. + index_col : string, optional, default: None + Column name to use as index for the returned DataFrame object. + coerce_float : bool, default True + Attempt to convert values of non-string, non-numeric objects (like + decimal.Decimal) to floating point, useful for SQL result sets. + params : list, tuple or dict, optional, default: None + List of parameters to pass to execute method. The syntax used + to pass parameters is database driver dependent. Check your + database driver documentation for which of the five syntax styles, + described in PEP 249's paramstyle, is supported. + Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict + corresponds to the keyword arguments of + :func:`pandas.to_datetime` Especially useful with databases + without native Datetime support, such as SQLite. + chunksize : int, default None + If specified, return an iterator where `chunksize` is the number + of rows to include in each chunk. + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'} + + .. versionadded:: 1.3.0 + + Returns + ------- + DataFrame + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql + + """ + result = self.execute(sql, params) + columns = result.keys() + + if chunksize is not None: + self.returns_generator = True + return self._query_iterator( + result, + self.exit_stack, + chunksize, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + else: + data = result.fetchall() + frame = _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + return frame + + read_sql = read_query + + def prep_table( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool | str | list[str] | None = True, + index_label=None, + schema=None, + dtype: DtypeArg | None = None, + ) -> SQLTable: + """ + Prepares table in the database for data insertion. Creates it if needed, etc. + """ + if dtype: + if not is_dict_like(dtype): + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] + else: + dtype = cast(dict, dtype) + + from sqlalchemy.types import TypeEngine + + for col, my_type in dtype.items(): + if isinstance(my_type, type) and issubclass(my_type, TypeEngine): + pass + elif isinstance(my_type, TypeEngine): + pass + else: + raise ValueError(f"The type of {col} is not a SQLAlchemy type") + + table = SQLTable( + name, + self, + frame=frame, + index=index, + if_exists=if_exists, + index_label=index_label, + schema=schema, + dtype=dtype, + ) + table.create() + return table + + def check_case_sensitive( + self, + name: str, + schema: str | None, + ) -> None: + """ + Checks table name for issues with case-sensitivity. + Method is called after data is inserted. + """ + if not name.isdigit() and not name.islower(): + # check for potentially case sensitivity issues (GH7815) + # Only check when name is not a number and name is not lower case + from sqlalchemy import inspect as sqlalchemy_inspect + + insp = sqlalchemy_inspect(self.con) + table_names = insp.get_table_names(schema=schema or self.meta.schema) + if name not in table_names: + msg = ( + f"The provided table name '{name}' is not found exactly as " + "such in the database after writing the table, possibly " + "due to case sensitivity issues. Consider using lower " + "case table names." + ) + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(), + ) + + def to_sql( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label=None, + schema: str | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame + name : string + Name of SQL table. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default True + Write DataFrame index as a column. + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If specified, this overwrites the default + schema of the SQLDatabase object. + chunksize : int, default None + If not None, then rows will be written in batches of this size at a + time. If None, all rows will be written at once. + dtype : single type or dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a SQLAlchemy type. If all columns are of the same type, one + single value can be used. + method : {None', 'multi', callable}, default None + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + engine : {'auto', 'sqlalchemy'}, default 'auto' + SQL engine library to use. If 'auto', then the option + ``io.sql.engine`` is used. The default ``io.sql.engine`` + behavior is 'sqlalchemy' + + .. versionadded:: 1.3.0 + + **engine_kwargs + Any additional kwargs are passed to the engine. + """ + sql_engine = get_engine(engine) + + table = self.prep_table( + frame=frame, + name=name, + if_exists=if_exists, + index=index, + index_label=index_label, + schema=schema, + dtype=dtype, + ) + + total_inserted = sql_engine.insert_records( + table=table, + con=self.con, + frame=frame, + name=name, + index=index, + schema=schema, + chunksize=chunksize, + method=method, + **engine_kwargs, + ) + + self.check_case_sensitive(name=name, schema=schema) + return total_inserted + + @property + def tables(self): + return self.meta.tables + + def has_table(self, name: str, schema: str | None = None) -> bool: + from sqlalchemy import inspect as sqlalchemy_inspect + + insp = sqlalchemy_inspect(self.con) + return insp.has_table(name, schema or self.meta.schema) + + def get_table(self, table_name: str, schema: str | None = None) -> Table: + from sqlalchemy import ( + Numeric, + Table, + ) + + schema = schema or self.meta.schema + tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema) + for column in tbl.columns: + if isinstance(column.type, Numeric): + column.type.asdecimal = False + return tbl + + def drop_table(self, table_name: str, schema: str | None = None) -> None: + schema = schema or self.meta.schema + if self.has_table(table_name, schema): + self.meta.reflect( + bind=self.con, only=[table_name], schema=schema, views=True + ) + with self.run_transaction(): + self.get_table(table_name, schema).drop(bind=self.con) + self.meta.clear() + + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: list[str] | None = None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ) -> str: + table = SQLTable( + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, + ) + return str(table.sql_schema()) + + +# ---- SQL without SQLAlchemy --- + + +class ADBCDatabase(PandasSQL): + """ + This class enables conversion between DataFrame and SQL databases + using ADBC to handle DataBase abstraction. + + Parameters + ---------- + con : adbc_driver_manager.dbapi.Connection + """ + + def __init__(self, con) -> None: + self.con = con + + @contextmanager + def run_transaction(self): + with self.con.cursor() as cur: + try: + yield cur + except Exception: + self.con.rollback() + raise + self.con.commit() + + def execute(self, sql: str | Select | TextClause, params=None): + if not isinstance(sql, str): + raise TypeError("Query must be a string unless using sqlalchemy.") + args = [] if params is None else [params] + cur = self.con.cursor() + try: + cur.execute(sql, *args) + return cur + except Exception as exc: + try: + self.con.rollback() + except Exception as inner_exc: # pragma: no cover + ex = DatabaseError( + f"Execution failed on sql: {sql}\n{exc}\nunable to rollback" + ) + raise ex from inner_exc + + ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") + raise ex from exc + + def read_table( + self, + table_name: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + columns=None, + schema: str | None = None, + chunksize: int | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL database table into a DataFrame. + + Parameters + ---------- + table_name : str + Name of SQL table in database. + coerce_float : bool, default True + Raises NotImplementedError + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg}``, where the arg corresponds + to the keyword arguments of :func:`pandas.to_datetime`. + Especially useful with databases without native Datetime support, + such as SQLite. + columns : list, default: None + List of column names to select from SQL table. + schema : string, default None + Name of SQL schema in database to query (if database flavor + supports this). If specified, this overwrites the default + schema of the SQL database object. + chunksize : int, default None + Raises NotImplementedError + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + DataFrame + + See Also + -------- + pandas.read_sql_table + SQLDatabase.read_query + + """ + if coerce_float is not True: + raise NotImplementedError( + "'coerce_float' is not implemented for ADBC drivers" + ) + if chunksize: + raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") + + if columns: + if index_col: + index_select = maybe_make_list(index_col) + else: + index_select = [] + to_select = index_select + columns + select_list = ", ".join(f'"{x}"' for x in to_select) + else: + select_list = "*" + if schema: + stmt = f"SELECT {select_list} FROM {schema}.{table_name}" + else: + stmt = f"SELECT {select_list} FROM {table_name}" + + mapping: type[ArrowDtype] | None | Callable + if dtype_backend == "pyarrow": + mapping = ArrowDtype + elif dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping().get + elif using_pyarrow_string_dtype(): + from pandas.io._util import arrow_string_types_mapper + + arrow_string_types_mapper() + else: + mapping = None + + with self.con.cursor() as cur: + cur.execute(stmt) + df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping) + + return _wrap_result_adbc( + df, + index_col=index_col, + parse_dates=parse_dates, + ) + + def read_query( + self, + sql: str, + index_col: str | list[str] | None = None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + """ + Read SQL query into a DataFrame. + + Parameters + ---------- + sql : str + SQL query to be executed. + index_col : string, optional, default: None + Column name to use as index for the returned DataFrame object. + coerce_float : bool, default True + Raises NotImplementedError + params : list, tuple or dict, optional, default: None + Raises NotImplementedError + parse_dates : list or dict, default: None + - List of column names to parse as dates. + - Dict of ``{column_name: format string}`` where format string is + strftime compatible in case of parsing string times, or is one of + (D, s, ns, ms, us) in case of parsing integer timestamps. + - Dict of ``{column_name: arg dict}``, where the arg dict + corresponds to the keyword arguments of + :func:`pandas.to_datetime` Especially useful with databases + without native Datetime support, such as SQLite. + chunksize : int, default None + Raises NotImplementedError + dtype : Type name or dict of columns + Data type for data or columns. E.g. np.float64 or + {'a': np.float64, 'b': np.int32, 'c': 'Int64'} + + .. versionadded:: 1.3.0 + + Returns + ------- + DataFrame + + See Also + -------- + read_sql_table : Read SQL database table into a DataFrame. + read_sql + + """ + if coerce_float is not True: + raise NotImplementedError( + "'coerce_float' is not implemented for ADBC drivers" + ) + if params: + raise NotImplementedError("'params' is not implemented for ADBC drivers") + if chunksize: + raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") + + mapping: type[ArrowDtype] | None | Callable + if dtype_backend == "pyarrow": + mapping = ArrowDtype + elif dtype_backend == "numpy_nullable": + from pandas.io._util import _arrow_dtype_mapping + + mapping = _arrow_dtype_mapping().get + else: + mapping = None + + with self.con.cursor() as cur: + cur.execute(sql) + df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping) + + return _wrap_result_adbc( + df, + index_col=index_col, + parse_dates=parse_dates, + dtype=dtype, + ) + + read_sql = read_query + + def to_sql( + self, + frame, + name: str, + if_exists: Literal["fail", "replace", "append"] = "fail", + index: bool = True, + index_label=None, + schema: str | None = None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame : DataFrame + name : string + Name of SQL table. + if_exists : {'fail', 'replace', 'append'}, default 'fail' + - fail: If table exists, do nothing. + - replace: If table exists, drop it, recreate it, and insert data. + - append: If table exists, insert data. Create if does not exist. + index : boolean, default True + Write DataFrame index as a column. + index_label : string or sequence, default None + Raises NotImplementedError + schema : string, default None + Name of SQL schema in database to write to (if database flavor + supports this). If specified, this overwrites the default + schema of the SQLDatabase object. + chunksize : int, default None + Raises NotImplementedError + dtype : single type or dict of column name to SQL type, default None + Raises NotImplementedError + method : {None', 'multi', callable}, default None + Raises NotImplementedError + engine : {'auto', 'sqlalchemy'}, default 'auto' + Raises NotImplementedError if not set to 'auto' + """ + if index_label: + raise NotImplementedError( + "'index_label' is not implemented for ADBC drivers" + ) + if chunksize: + raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") + if dtype: + raise NotImplementedError("'dtype' is not implemented for ADBC drivers") + if method: + raise NotImplementedError("'method' is not implemented for ADBC drivers") + if engine != "auto": + raise NotImplementedError( + "engine != 'auto' not implemented for ADBC drivers" + ) + + if schema: + table_name = f"{schema}.{name}" + else: + table_name = name + + # pandas if_exists="append" will still create the + # table if it does not exist; ADBC is more explicit with append/create + # as applicable modes, so the semantics get blurred across + # the libraries + mode = "create" + if self.has_table(name, schema): + if if_exists == "fail": + raise ValueError(f"Table '{table_name}' already exists.") + elif if_exists == "replace": + with self.con.cursor() as cur: + cur.execute(f"DROP TABLE {table_name}") + elif if_exists == "append": + mode = "append" + + import pyarrow as pa + + try: + tbl = pa.Table.from_pandas(frame, preserve_index=index) + except pa.ArrowNotImplementedError as exc: + raise ValueError("datatypes not supported") from exc + + with self.con.cursor() as cur: + total_inserted = cur.adbc_ingest( + table_name=name, data=tbl, mode=mode, db_schema_name=schema + ) + + self.con.commit() + return total_inserted + + def has_table(self, name: str, schema: str | None = None) -> bool: + meta = self.con.adbc_get_objects( + db_schema_filter=schema, table_name_filter=name + ).read_all() + + for catalog_schema in meta["catalog_db_schemas"].to_pylist(): + if not catalog_schema: + continue + for schema_record in catalog_schema: + if not schema_record: + continue + + for table_record in schema_record["db_schema_tables"]: + if table_record["table_name"] == name: + return True + + return False + + def _create_sql_schema( + self, + frame: DataFrame, + table_name: str, + keys: list[str] | None = None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ) -> str: + raise NotImplementedError("not implemented for adbc") + + +# sqlite-specific sql strings and handler class +# dictionary used for readability purposes +_SQL_TYPES = { + "string": "TEXT", + "floating": "REAL", + "integer": "INTEGER", + "datetime": "TIMESTAMP", + "date": "DATE", + "time": "TIME", + "boolean": "INTEGER", +} + + +def _get_unicode_name(name: object): + try: + uname = str(name).encode("utf-8", "strict").decode("utf-8") + except UnicodeError as err: + raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err + return uname + + +def _get_valid_sqlite_name(name: object): + # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ + # -for-sqlite-table-column-names-in-python + # Ensure the string can be encoded as UTF-8. + # Ensure the string does not include any NUL characters. + # Replace all " with "". + # Wrap the entire thing in double quotes. + + uname = _get_unicode_name(name) + if not len(uname): + raise ValueError("Empty table or column name specified") + + nul_index = uname.find("\x00") + if nul_index >= 0: + raise ValueError("SQLite identifier cannot contain NULs") + return '"' + uname.replace('"', '""') + '"' + + +class SQLiteTable(SQLTable): + """ + Patch the SQLTable for fallback support. + Instead of a table variable just use the Create Table statement. + """ + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + self._register_date_adapters() + + def _register_date_adapters(self) -> None: + # GH 8341 + # register an adapter callable for datetime.time object + import sqlite3 + + # this will transform time(12,34,56,789) into '12:34:56.000789' + # (this is what sqlalchemy does) + def _adapt_time(t) -> str: + # This is faster than strftime + return f"{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}" + + # Also register adapters for date/datetime and co + # xref https://docs.python.org/3.12/library/sqlite3.html#adapter-and-converter-recipes + # Python 3.12+ doesn't auto-register adapters for us anymore + + adapt_date_iso = lambda val: val.isoformat() + adapt_datetime_iso = lambda val: val.isoformat(" ") + + sqlite3.register_adapter(time, _adapt_time) + + sqlite3.register_adapter(date, adapt_date_iso) + sqlite3.register_adapter(datetime, adapt_datetime_iso) + + convert_date = lambda val: date.fromisoformat(val.decode()) + convert_timestamp = lambda val: datetime.fromisoformat(val.decode()) + + sqlite3.register_converter("date", convert_date) + sqlite3.register_converter("timestamp", convert_timestamp) + + def sql_schema(self) -> str: + return str(";\n".join(self.table)) + + def _execute_create(self) -> None: + with self.pd_sql.run_transaction() as conn: + for stmt in self.table: + conn.execute(stmt) + + def insert_statement(self, *, num_rows: int) -> str: + names = list(map(str, self.frame.columns)) + wld = "?" # wildcard char + escape = _get_valid_sqlite_name + + if self.index is not None: + for idx in self.index[::-1]: + names.insert(0, idx) + + bracketed_names = [escape(column) for column in names] + col_names = ",".join(bracketed_names) + + row_wildcards = ",".join([wld] * len(names)) + wildcards = ",".join([f"({row_wildcards})" for _ in range(num_rows)]) + insert_statement = ( + f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" + ) + return insert_statement + + def _execute_insert(self, conn, keys, data_iter) -> int: + data_list = list(data_iter) + conn.executemany(self.insert_statement(num_rows=1), data_list) + return conn.rowcount + + def _execute_insert_multi(self, conn, keys, data_iter) -> int: + data_list = list(data_iter) + flattened_data = [x for row in data_list for x in row] + conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) + return conn.rowcount + + def _create_table_setup(self): + """ + Return a list of SQL statements that creates a table reflecting the + structure of a DataFrame. The first entry will be a CREATE TABLE + statement while the rest will be CREATE INDEX statements. + """ + column_names_and_types = self._get_column_names_and_types(self._sql_type_name) + escape = _get_valid_sqlite_name + + create_tbl_stmts = [ + escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types + ] + + if self.keys is not None and len(self.keys): + if not is_list_like(self.keys): + keys = [self.keys] + else: + keys = self.keys + cnames_br = ", ".join([escape(c) for c in keys]) + create_tbl_stmts.append( + f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" + ) + if self.schema: + schema_name = self.schema + "." + else: + schema_name = "" + create_stmts = [ + "CREATE TABLE " + + schema_name + + escape(self.name) + + " (\n" + + ",\n ".join(create_tbl_stmts) + + "\n)" + ] + + ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index] + if len(ix_cols): + cnames = "_".join(ix_cols) + cnames_br = ",".join([escape(c) for c in ix_cols]) + create_stmts.append( + "CREATE INDEX " + + escape("ix_" + self.name + "_" + cnames) + + "ON " + + escape(self.name) + + " (" + + cnames_br + + ")" + ) + + return create_stmts + + def _sql_type_name(self, col): + dtype: DtypeArg = self.dtype or {} + if is_dict_like(dtype): + dtype = cast(dict, dtype) + if col.name in dtype: + return dtype[col.name] + + # Infer type of column, while ignoring missing values. + # Needed for inserting typed data containing NULLs, GH 8778. + col_type = lib.infer_dtype(col, skipna=True) + + if col_type == "timedelta64": + warnings.warn( + "the 'timedelta' type is not supported, and will be " + "written as integer values (ns frequency) to the database.", + UserWarning, + stacklevel=find_stack_level(), + ) + col_type = "integer" + + elif col_type == "datetime64": + col_type = "datetime" + + elif col_type == "empty": + col_type = "string" + + elif col_type == "complex": + raise ValueError("Complex datatypes not supported") + + if col_type not in _SQL_TYPES: + col_type = "string" + + return _SQL_TYPES[col_type] + + +class SQLiteDatabase(PandasSQL): + """ + Version of SQLDatabase to support SQLite connections (fallback without + SQLAlchemy). This should only be used internally. + + Parameters + ---------- + con : sqlite connection object + + """ + + def __init__(self, con) -> None: + self.con = con + + @contextmanager + def run_transaction(self): + cur = self.con.cursor() + try: + yield cur + self.con.commit() + except Exception: + self.con.rollback() + raise + finally: + cur.close() + + def execute(self, sql: str | Select | TextClause, params=None): + if not isinstance(sql, str): + raise TypeError("Query must be a string unless using sqlalchemy.") + args = [] if params is None else [params] + cur = self.con.cursor() + try: + cur.execute(sql, *args) + return cur + except Exception as exc: + try: + self.con.rollback() + except Exception as inner_exc: # pragma: no cover + ex = DatabaseError( + f"Execution failed on sql: {sql}\n{exc}\nunable to rollback" + ) + raise ex from inner_exc + + ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") + raise ex from exc + + @staticmethod + def _query_iterator( + cursor, + chunksize: int, + columns, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ): + """Return generator through chunked result set""" + has_read_data = False + while True: + data = cursor.fetchmany(chunksize) + if type(data) == tuple: + data = list(data) + if not data: + cursor.close() + if not has_read_data: + result = DataFrame.from_records( + [], columns=columns, coerce_float=coerce_float + ) + if dtype: + result = result.astype(dtype) + yield result + break + + has_read_data = True + yield _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + + def read_query( + self, + sql, + index_col=None, + coerce_float: bool = True, + parse_dates=None, + params=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", + ) -> DataFrame | Iterator[DataFrame]: + cursor = self.execute(sql, params) + columns = [col_desc[0] for col_desc in cursor.description] + + if chunksize is not None: + return self._query_iterator( + cursor, + chunksize, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + else: + data = self._fetchall_as_list(cursor) + cursor.close() + + frame = _wrap_result( + data, + columns, + index_col=index_col, + coerce_float=coerce_float, + parse_dates=parse_dates, + dtype=dtype, + dtype_backend=dtype_backend, + ) + return frame + + def _fetchall_as_list(self, cur): + result = cur.fetchall() + if not isinstance(result, list): + result = list(result) + return result + + def to_sql( + self, + frame, + name: str, + if_exists: str = "fail", + index: bool = True, + index_label=None, + schema=None, + chunksize: int | None = None, + dtype: DtypeArg | None = None, + method: Literal["multi"] | Callable | None = None, + engine: str = "auto", + **engine_kwargs, + ) -> int | None: + """ + Write records stored in a DataFrame to a SQL database. + + Parameters + ---------- + frame: DataFrame + name: string + Name of SQL table. + if_exists: {'fail', 'replace', 'append'}, default 'fail' + fail: If table exists, do nothing. + replace: If table exists, drop it, recreate it, and insert data. + append: If table exists, insert data. Create if it does not exist. + index : bool, default True + Write DataFrame index as a column + index_label : string or sequence, default None + Column label for index column(s). If None is given (default) and + `index` is True, then the index names are used. + A sequence should be given if the DataFrame uses MultiIndex. + schema : string, default None + Ignored parameter included for compatibility with SQLAlchemy + version of ``to_sql``. + chunksize : int, default None + If not None, then rows will be written in batches of this + size at a time. If None, all rows will be written at once. + dtype : single type or dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a string. If all columns are of the same type, one single value + can be used. + method : {None, 'multi', callable}, default None + Controls the SQL insertion clause used: + + * None : Uses standard SQL ``INSERT`` clause (one per row). + * 'multi': Pass multiple values in a single ``INSERT`` clause. + * callable with signature ``(pd_table, conn, keys, data_iter)``. + + Details and a sample callable implementation can be found in the + section :ref:`insert method `. + """ + if dtype: + if not is_dict_like(dtype): + # error: Value expression in dictionary comprehension has incompatible + # type "Union[ExtensionDtype, str, dtype[Any], Type[object], + # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], + # Type[str], Type[float], Type[int], Type[complex], Type[bool], + # Type[object]]]]"; expected type "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" + dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] + else: + dtype = cast(dict, dtype) + + for col, my_type in dtype.items(): + if not isinstance(my_type, str): + raise ValueError(f"{col} ({my_type}) not a string") + + table = SQLiteTable( + name, + self, + frame=frame, + index=index, + if_exists=if_exists, + index_label=index_label, + dtype=dtype, + ) + table.create() + return table.insert(chunksize, method) + + def has_table(self, name: str, schema: str | None = None) -> bool: + wld = "?" + query = f""" + SELECT + name + FROM + sqlite_master + WHERE + type IN ('table', 'view') + AND name={wld}; + """ + + return len(self.execute(query, [name]).fetchall()) > 0 + + def get_table(self, table_name: str, schema: str | None = None) -> None: + return None # not supported in fallback mode + + def drop_table(self, name: str, schema: str | None = None) -> None: + drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" + self.execute(drop_sql) + + def _create_sql_schema( + self, + frame, + table_name: str, + keys=None, + dtype: DtypeArg | None = None, + schema: str | None = None, + ) -> str: + table = SQLiteTable( + table_name, + self, + frame=frame, + index=False, + keys=keys, + dtype=dtype, + schema=schema, + ) + return str(table.sql_schema()) + + +def get_schema( + frame, + name: str, + keys=None, + con=None, + dtype: DtypeArg | None = None, + schema: str | None = None, +) -> str: + """ + Get the SQL db table schema for the given frame. + + Parameters + ---------- + frame : DataFrame + name : str + name of SQL table + keys : string or sequence, default: None + columns to use a primary key + con: ADBC Connection, SQLAlchemy connectable, sqlite3 connection, default: None + ADBC provides high performance I/O with native type support, where available. + Using SQLAlchemy makes it possible to use any DB supported by that + library + If a DBAPI2 object, only sqlite3 is supported. + dtype : dict of column name to SQL type, default None + Optional specifying the datatype for columns. The SQL type should + be a SQLAlchemy type, or a string for sqlite3 fallback connection. + schema: str, default: None + Optional specifying the schema to be used in creating the table. + """ + with pandasSQL_builder(con=con) as pandas_sql: + return pandas_sql._create_sql_schema( + frame, name, keys=keys, dtype=dtype, schema=schema + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py b/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py new file mode 100644 index 0000000000000000000000000000000000000000..ac497cd266027f7af71884996182e1725baba361 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/io/xml.py @@ -0,0 +1,1177 @@ +""" +:mod:``pandas.io.xml`` is a module for reading XML. +""" + +from __future__ import annotations + +import io +from os import PathLike +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import warnings + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + ParserError, +) +from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import is_list_like + +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + infer_compression, + is_file_like, + is_fsspec_url, + is_url, + stringify_path, +) +from pandas.io.parsers import TextParser + +if TYPE_CHECKING: + from collections.abc import Sequence + from xml.etree.ElementTree import Element + + from lxml import etree + + from pandas._typing import ( + CompressionOptions, + ConvertersArg, + DtypeArg, + DtypeBackend, + FilePath, + ParseDatesArg, + ReadBuffer, + StorageOptions, + XMLParsers, + ) + + from pandas import DataFrame + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buffer", +) +class _XMLFrameParser: + """ + Internal subclass to parse XML into DataFrames. + + Parameters + ---------- + path_or_buffer : a valid JSON ``str``, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. + + xpath : str or regex + The ``XPath`` expression to parse required set of nodes for + migration to :class:`~pandas.DataFrame`. ``etree`` supports limited ``XPath``. + + namespaces : dict + The namespaces defined in XML document (``xmlns:namespace='URI'``) + as dicts with key being namespace and value the URI. + + elems_only : bool + Parse only the child elements at the specified ``xpath``. + + attrs_only : bool + Parse only the attributes at the specified ``xpath``. + + names : list + Column names for :class:`~pandas.DataFrame` of parsed XML data. + + dtype : dict + Data type for data or columns. E.g. {{'a': np.float64, + 'b': np.int32, 'c': 'Int64'}} + + .. versionadded:: 1.5.0 + + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels. + + .. versionadded:: 1.5.0 + + parse_dates : bool or list of int or names or list of lists or dict + Converts either index or select columns to datetimes + + .. versionadded:: 1.5.0 + + encoding : str + Encoding of xml object or document. + + stylesheet : str or file-like + URL, file, file-like object, or a raw string containing XSLT, + ``etree`` does not support XSLT but retained for consistency. + + iterparse : dict, optional + Dict with row element as key and list of descendant elements + and/or attributes as value to be retrieved in iterparsing of + XML document. + + .. versionadded:: 1.5.0 + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + See also + -------- + pandas.io.xml._EtreeFrameParser + pandas.io.xml._LxmlFrameParser + + Notes + ----- + To subclass this class effectively you must override the following methods:` + * :func:`parse_data` + * :func:`_parse_nodes` + * :func:`_iterparse_nodes` + * :func:`_parse_doc` + * :func:`_validate_names` + * :func:`_validate_path` + + + See each method's respective documentation for details on their + functionality. + """ + + def __init__( + self, + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + xpath: str, + namespaces: dict[str, str] | None, + elems_only: bool, + attrs_only: bool, + names: Sequence[str] | None, + dtype: DtypeArg | None, + converters: ConvertersArg | None, + parse_dates: ParseDatesArg | None, + encoding: str | None, + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, + iterparse: dict[str, list[str]] | None, + compression: CompressionOptions, + storage_options: StorageOptions, + ) -> None: + self.path_or_buffer = path_or_buffer + self.xpath = xpath + self.namespaces = namespaces + self.elems_only = elems_only + self.attrs_only = attrs_only + self.names = names + self.dtype = dtype + self.converters = converters + self.parse_dates = parse_dates + self.encoding = encoding + self.stylesheet = stylesheet + self.iterparse = iterparse + self.is_style = None + self.compression: CompressionOptions = compression + self.storage_options = storage_options + + def parse_data(self) -> list[dict[str, str | None]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate ``xpath``, names, parse and return specific nodes. + """ + + raise AbstractMethodError(self) + + def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]: + """ + Parse xml nodes. + + This method will parse the children and attributes of elements + in ``xpath``, conditionally for only elements, only attributes + or both while optionally renaming node names. + + Raises + ------ + ValueError + * If only elements and only attributes are specified. + + Notes + ----- + Namespace URIs will be removed from return node values. Also, + elements with missing children or attributes compared to siblings + will have optional keys filled with None values. + """ + + dicts: list[dict[str, str | None]] + + if self.elems_only and self.attrs_only: + raise ValueError("Either element or attributes can be parsed not both.") + if self.elems_only: + if self.names: + dicts = [ + { + **( + {el.tag: el.text} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + else: + dicts = [ + {ch.tag: ch.text if ch.text else None for ch in el.findall("*")} + for el in elems + ] + + elif self.attrs_only: + dicts = [ + {k: v if v else None for k, v in el.attrib.items()} for el in elems + ] + + elif self.names: + dicts = [ + { + **el.attrib, + **({el.tag: el.text} if el.text and not el.text.isspace() else {}), + **{ + nm: ch.text if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + + else: + dicts = [ + { + **el.attrib, + **({el.tag: el.text} if el.text and not el.text.isspace() else {}), + **{ch.tag: ch.text if ch.text else None for ch in el.findall("*")}, + } + for el in elems + ] + + dicts = [ + {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts + ] + + keys = list(dict.fromkeys([k for d in dicts for k in d.keys()])) + dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts] + + if self.names: + dicts = [dict(zip(self.names, d.values())) for d in dicts] + + return dicts + + def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]: + """ + Iterparse xml nodes. + + This method will read in local disk, decompressed XML files for elements + and underlying descendants using iterparse, a method to iterate through + an XML tree without holding entire XML tree in memory. + + Raises + ------ + TypeError + * If ``iterparse`` is not a dict or its dict value is not list-like. + ParserError + * If ``path_or_buffer`` is not a physical file on disk or file-like object. + * If no data is returned from selected items in ``iterparse``. + + Notes + ----- + Namespace URIs will be removed from return node values. Also, + elements with missing children or attributes in submitted list + will have optional keys filled with None values. + """ + + dicts: list[dict[str, str | None]] = [] + row: dict[str, str | None] | None = None + + if not isinstance(self.iterparse, dict): + raise TypeError( + f"{type(self.iterparse).__name__} is not a valid type for iterparse" + ) + + row_node = next(iter(self.iterparse.keys())) if self.iterparse else "" + if not is_list_like(self.iterparse[row_node]): + raise TypeError( + f"{type(self.iterparse[row_node])} is not a valid type " + "for value in iterparse" + ) + + if (not hasattr(self.path_or_buffer, "read")) and ( + not isinstance(self.path_or_buffer, (str, PathLike)) + or is_url(self.path_or_buffer) + or is_fsspec_url(self.path_or_buffer) + or ( + isinstance(self.path_or_buffer, str) + and self.path_or_buffer.startswith((" list[Any]: + """ + Validate ``xpath``. + + This method checks for syntax, evaluation, or empty nodes return. + + Raises + ------ + SyntaxError + * If xpah is not supported or issues with namespaces. + + ValueError + * If xpah does not return any nodes. + """ + + raise AbstractMethodError(self) + + def _validate_names(self) -> None: + """ + Validate names. + + This method will check if names is a list-like and aligns + with length of parse nodes. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + raise AbstractMethodError(self) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> Element | etree._Element: + """ + Build tree from path_or_buffer. + + This method will parse XML object into tree + either from string/bytes or file location. + """ + raise AbstractMethodError(self) + + +class _EtreeFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into DataFrames with the Python + standard library XML module: `xml.etree.ElementTree`. + """ + + def parse_data(self) -> list[dict[str, str | None]]: + from xml.etree.ElementTree import iterparse + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + if self.iterparse is None: + self.xml_doc = self._parse_doc(self.path_or_buffer) + elems = self._validate_path() + + self._validate_names() + + xml_dicts: list[dict[str, str | None]] = ( + self._parse_nodes(elems) + if self.iterparse is None + else self._iterparse_nodes(iterparse) + ) + + return xml_dicts + + def _validate_path(self) -> list[Any]: + """ + Notes + ----- + ``etree`` supports limited ``XPath``. If user attempts a more complex + expression syntax error will raise. + """ + + msg = ( + "xpath does not return any nodes or attributes. " + "Be sure to specify in `xpath` the parent nodes of " + "children and attributes to parse. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + try: + elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) + children = [ch for el in elems for ch in el.findall("*")] + attrs = {k: v for el in elems for k, v in el.attrib.items()} + + if elems is None: + raise ValueError(msg) + + if elems is not None: + if self.elems_only and children == []: + raise ValueError(msg) + if self.attrs_only and attrs == {}: + raise ValueError(msg) + if children == [] and attrs == {}: + raise ValueError(msg) + + except (KeyError, SyntaxError): + raise SyntaxError( + "You have used an incorrect or unsupported XPath " + "expression for etree library or you used an " + "undeclared namespace prefix." + ) + + return elems + + def _validate_names(self) -> None: + children: list[Any] + + if self.names: + if self.iterparse: + children = self.iterparse[next(iter(self.iterparse))] + else: + parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces) + children = parent.findall("*") if parent is not None else [] + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> Element: + from xml.etree.ElementTree import ( + XMLParser, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=raw_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + document = parse(xml_data, parser=curr_parser) + + return document.getroot() + + +class _LxmlFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into :class:`~pandas.DataFrame` with third-party + full-featured XML library, ``lxml``, that supports + ``XPath`` 1.0 and XSLT 1.0. + """ + + def parse_data(self) -> list[dict[str, str | None]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate ``xpath``, names, optionally parse and run XSLT, + and parse original or transformed XML and return specific nodes. + """ + from lxml.etree import iterparse + + if self.iterparse is None: + self.xml_doc = self._parse_doc(self.path_or_buffer) + + if self.stylesheet: + self.xsl_doc = self._parse_doc(self.stylesheet) + self.xml_doc = self._transform_doc() + + elems = self._validate_path() + + self._validate_names() + + xml_dicts: list[dict[str, str | None]] = ( + self._parse_nodes(elems) + if self.iterparse is None + else self._iterparse_nodes(iterparse) + ) + + return xml_dicts + + def _validate_path(self) -> list[Any]: + msg = ( + "xpath does not return any nodes or attributes. " + "Be sure to specify in `xpath` the parent nodes of " + "children and attributes to parse. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + + elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces) + children = [ch for el in elems for ch in el.xpath("*")] + attrs = {k: v for el in elems for k, v in el.attrib.items()} + + if elems == []: + raise ValueError(msg) + + if elems != []: + if self.elems_only and children == []: + raise ValueError(msg) + if self.attrs_only and attrs == {}: + raise ValueError(msg) + if children == [] and attrs == {}: + raise ValueError(msg) + + return elems + + def _validate_names(self) -> None: + children: list[Any] + + if self.names: + if self.iterparse: + children = self.iterparse[next(iter(self.iterparse))] + else: + children = self.xml_doc.xpath( + self.xpath + "[1]/*", namespaces=self.namespaces + ) + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc( + self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str] + ) -> etree._Element: + from lxml.etree import ( + XMLParser, + fromstring, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=raw_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + if self.encoding is None: + raise TypeError( + "Can not pass encoding None when input is StringIO." + ) + + document = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + document = parse(xml_data, parser=curr_parser) + + return document + + def _transform_doc(self) -> etree._XSLTResultTree: + """ + Transform original tree using stylesheet. + + This method will transform original xml using XSLT script into + am ideally flatter xml document for easier parsing and migration + to Data Frame. + """ + from lxml.etree import XSLT + + transformer = XSLT(self.xsl_doc) + new_doc = transformer(self.xml_doc) + + return new_doc + + +def get_data_from_filepath( + filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str], + encoding: str | None, + compression: CompressionOptions, + storage_options: StorageOptions, +) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]: + """ + Extract raw XML data. + + The method accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. XML string or bytes + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + """ + if not isinstance(filepath_or_buffer, bytes): + filepath_or_buffer = stringify_path(filepath_or_buffer) + + if ( + isinstance(filepath_or_buffer, str) + and not filepath_or_buffer.startswith((" io.StringIO | io.BytesIO: + """ + Convert extracted raw data. + + This method will return underlying data of extracted XML content. + The data either has a `read` attribute (e.g. a file object or a + StringIO/BytesIO) or is a string or bytes that is an XML document. + """ + + if isinstance(data, str): + data = io.StringIO(data) + + elif isinstance(data, bytes): + data = io.BytesIO(data) + + return data + + +def _data_to_frame(data, **kwargs) -> DataFrame: + """ + Convert parsed data to Data Frame. + + This method will bind xml dictionary data of keys and values + into named columns of Data Frame using the built-in TextParser + class that build Data Frame and infers specific dtypes. + """ + + tags = next(iter(data)) + nodes = [list(d.values()) for d in data] + + try: + with TextParser(nodes, names=tags, **kwargs) as tp: + return tp.read() + except ParserError: + raise ParserError( + "XML document may be too complex for import. " + "Try to flatten document and use distinct " + "element and attribute names." + ) + + +def _parse( + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + xpath: str, + namespaces: dict[str, str] | None, + elems_only: bool, + attrs_only: bool, + names: Sequence[str] | None, + dtype: DtypeArg | None, + converters: ConvertersArg | None, + parse_dates: ParseDatesArg | None, + encoding: str | None, + parser: XMLParsers, + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, + iterparse: dict[str, list[str]] | None, + compression: CompressionOptions, + storage_options: StorageOptions, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + **kwargs, +) -> DataFrame: + """ + Call internal parsers. + + This method will conditionally call internal parsers: + LxmlFrameParser and/or EtreeParser. + + Raises + ------ + ImportError + * If lxml is not installed if selected as parser. + + ValueError + * If parser is not lxml or etree. + """ + + p: _EtreeFrameParser | _LxmlFrameParser + + if isinstance(path_or_buffer, str) and not any( + [ + is_file_like(path_or_buffer), + file_exists(path_or_buffer), + is_url(path_or_buffer), + is_fsspec_url(path_or_buffer), + ] + ): + warnings.warn( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if parser == "lxml": + lxml = import_optional_dependency("lxml.etree", errors="ignore") + + if lxml is not None: + p = _LxmlFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + dtype, + converters, + parse_dates, + encoding, + stylesheet, + iterparse, + compression, + storage_options, + ) + else: + raise ImportError("lxml not found, please install or use the etree parser.") + + elif parser == "etree": + p = _EtreeFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + dtype, + converters, + parse_dates, + encoding, + stylesheet, + iterparse, + compression, + storage_options, + ) + else: + raise ValueError("Values for parser can only be lxml or etree.") + + data_dicts = p.parse_data() + + return _data_to_frame( + data=data_dicts, + dtype=dtype, + converters=converters, + parse_dates=parse_dates, + dtype_backend=dtype_backend, + **kwargs, + ) + + +@doc( + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] % "path_or_buffer", +) +def read_xml( + path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], + *, + xpath: str = "./*", + namespaces: dict[str, str] | None = None, + elems_only: bool = False, + attrs_only: bool = False, + names: Sequence[str] | None = None, + dtype: DtypeArg | None = None, + converters: ConvertersArg | None = None, + parse_dates: ParseDatesArg | None = None, + # encoding can not be None for lxml and StringIO input + encoding: str | None = "utf-8", + parser: XMLParsers = "lxml", + stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None, + iterparse: dict[str, list[str]] | None = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame: + r""" + Read XML document into a :class:`~pandas.DataFrame` object. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a ``read()`` function. The string can be any valid XML + string or a path. The string can further be a URL. Valid URL schemes + include http, ftp, s3, and file. + + .. deprecated:: 2.1.0 + Passing xml literal strings is deprecated. + Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead. + + xpath : str, optional, default './\*' + The ``XPath`` to parse required set of nodes for migration to + :class:`~pandas.DataFrame`.``XPath`` should return a collection of elements + and not a single element. Note: The ``etree`` parser supports limited ``XPath`` + expressions. For more complex ``XPath``, use ``lxml`` which requires + installation. + + namespaces : dict, optional + The namespaces defined in XML document as dicts with key being + namespace prefix and value the URI. There is no need to include all + namespaces in XML, only the ones used in ``xpath`` expression. + Note: if XML document uses default namespace denoted as + `xmlns=''` without a prefix, you must assign any temporary + namespace prefix such as 'doc' to the URI in order to parse + underlying nodes and/or attributes. For example, :: + + namespaces = {{"doc": "https://example.com"}} + + elems_only : bool, optional, default False + Parse only the child elements at the specified ``xpath``. By default, + all child elements and non-empty text nodes are returned. + + attrs_only : bool, optional, default False + Parse only the attributes at the specified ``xpath``. + By default, all attributes are returned. + + names : list-like, optional + Column names for DataFrame of parsed XML data. Use this parameter to + rename original element names and distinguish same named elements and + attributes. + + dtype : Type name or dict of column -> type, optional + Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32, + 'c': 'Int64'}} + Use `str` or `object` together with suitable `na_values` settings + to preserve and not interpret dtype. + If converters are specified, they will be applied INSTEAD + of dtype conversion. + + .. versionadded:: 1.5.0 + + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can either + be integers or column labels. + + .. versionadded:: 1.5.0 + + parse_dates : bool or list of int or names or list of lists or dict, default False + Identifiers to parse index or columns to datetime. The behavior is as follows: + + * boolean. If True -> try parsing the index. + * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 + each as a separate date column. + * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as + a single date column. + * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call + result 'foo' + + .. versionadded:: 1.5.0 + + encoding : str, optional, default 'utf-8' + Encoding of XML document. + + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for retrieval of data. Only 'lxml' and + 'etree' are supported. With 'lxml' more complex ``XPath`` searches + and ability to use XSLT stylesheet are supported. + + stylesheet : str, path object or file-like object + A URL, file-like object, or a raw string containing an XSLT script. + This stylesheet should flatten complex, deeply nested XML documents + for easier parsing. To use this feature you must have ``lxml`` module + installed and specify 'lxml' as ``parser``. The ``xpath`` must + reference nodes of transformed XML document generated after XSLT + transformation and not the original XML document. Only XSLT 1.0 + scripts and not later versions is currently supported. + + iterparse : dict, optional + The nodes or attributes to retrieve in iterparsing of XML document + as a dict with key being the name of repeating element and value being + list of elements or attribute names that are descendants of the repeated + element. Note: If this option is used, it will replace ``xpath`` parsing + and unlike ``xpath``, descendants do not need to relate to each other but can + exist any where in document under the repeating element. This memory- + efficient method should be used for very large XML files (500MB, 1GB, or 5GB+). + For example, :: + + iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}} + + .. versionadded:: 1.5.0 + + {decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + + {storage_options} + + dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + df + A DataFrame. + + See Also + -------- + read_json : Convert a JSON string to pandas object. + read_html : Read HTML tables into a list of DataFrame objects. + + Notes + ----- + This method is best designed to import shallow XML documents in + following format which is the ideal fit for the two-dimensions of a + ``DataFrame`` (row by column). :: + + + + data + data + data + ... + + + ... + + ... + + + As a file format, XML documents can be designed any way including + layout of elements and attributes as long as it conforms to W3C + specifications. Therefore, this method is a convenience handler for + a specific flatter design and not all possible XML structures. + + However, for more complex XML documents, ``stylesheet`` allows you to + temporarily redesign original document with XSLT (a special purpose + language) for a flatter version for migration to a DataFrame. + + This function will *always* return a single :class:`DataFrame` or raise + exceptions due to issues with XML document, ``xpath``, or other + parameters. + + See the :ref:`read_xml documentation in the IO section of the docs + ` for more information in using this method to parse XML + files to DataFrames. + + Examples + -------- + >>> from io import StringIO + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(StringIO(xml)) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... + ... + ... ''' + + >>> df = pd.read_xml(StringIO(xml), xpath=".//row") + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(StringIO(xml), + ... xpath="//doc:row", + ... namespaces={{"doc": "https://example.com"}}) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml_data = ''' + ... + ... + ... 0 + ... 1 + ... 2.5 + ... True + ... a + ... 2019-12-31 00:00:00 + ... + ... + ... 1 + ... 4.5 + ... False + ... b + ... 2019-12-31 00:00:00 + ... + ... + ... ''' + + >>> df = pd.read_xml(StringIO(xml_data), + ... dtype_backend="numpy_nullable", + ... parse_dates=["e"]) + >>> df + index a b c d e + 0 0 1 2.5 True a 2019-12-31 + 1 1 4.5 False b 2019-12-31 + """ + check_dtype_backend(dtype_backend) + + return _parse( + path_or_buffer=path_or_buffer, + xpath=xpath, + namespaces=namespaces, + elems_only=elems_only, + attrs_only=attrs_only, + names=names, + dtype=dtype, + converters=converters, + parse_dates=parse_dates, + encoding=encoding, + parser=parser, + stylesheet=stylesheet, + iterparse=iterparse, + compression=compression, + storage_options=storage_options, + dtype_backend=dtype_backend, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png new file mode 100644 index 0000000000000000000000000000000000000000..2cb593b8e1cf68e429cc8402838c31f70be59afc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20b56fadc7471c0694d3e8148d9e28a83d7967bac16bf8852094afea3950414 +size 2114