diff --git a/ckpts/hf_ckpt/global_step40/tokenizer.model b/ckpts/hf_ckpt/global_step40/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899 --- /dev/null +++ b/ckpts/hf_ckpt/global_step40/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/ckpts/hf_ckpt/tokenizer.model b/ckpts/hf_ckpt/tokenizer.model new file mode 100644 index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899 --- /dev/null +++ b/ckpts/hf_ckpt/tokenizer.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 +size 499723 diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..56e942902a96e7f012479a582c5cf89511219f9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA @@ -0,0 +1,105 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 3.1.3 +Summary: A very fast and expressive template engine. +Home-page: https://palletsprojects.com/p/jinja/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/jinja/ +Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: MarkupSafe >=2.0 +Provides-Extra: i18n +Requires-Dist: Babel >=2.7 ; extra == 'i18n' + +Jinja +===== + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U Jinja2 + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +In A Nutshell +------------- + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +Donate +------ + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://jinja.palletsprojects.com/ +- Changes: https://jinja.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/Jinja2/ +- Source Code: https://github.com/pallets/jinja/ +- Issue Tracker: https://github.com/pallets/jinja/issues/ +- Chat: https://discord.gg/pallets diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..8a511ac007aa6ef6cabad0bdb17d166d1398c11d --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD @@ -0,0 +1,58 @@ +Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301 +Jinja2-3.1.3.dist-info/RECORD,, +Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59 +Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927 +jinja2/__pycache__/__init__.cpython-310.pyc,, +jinja2/__pycache__/_identifier.cpython-310.pyc,, +jinja2/__pycache__/async_utils.cpython-310.pyc,, +jinja2/__pycache__/bccache.cpython-310.pyc,, +jinja2/__pycache__/compiler.cpython-310.pyc,, +jinja2/__pycache__/constants.cpython-310.pyc,, +jinja2/__pycache__/debug.cpython-310.pyc,, +jinja2/__pycache__/defaults.cpython-310.pyc,, +jinja2/__pycache__/environment.cpython-310.pyc,, +jinja2/__pycache__/exceptions.cpython-310.pyc,, +jinja2/__pycache__/ext.cpython-310.pyc,, +jinja2/__pycache__/filters.cpython-310.pyc,, +jinja2/__pycache__/idtracking.cpython-310.pyc,, +jinja2/__pycache__/lexer.cpython-310.pyc,, +jinja2/__pycache__/loaders.cpython-310.pyc,, +jinja2/__pycache__/meta.cpython-310.pyc,, +jinja2/__pycache__/nativetypes.cpython-310.pyc,, +jinja2/__pycache__/nodes.cpython-310.pyc,, +jinja2/__pycache__/optimizer.cpython-310.pyc,, +jinja2/__pycache__/parser.cpython-310.pyc,, +jinja2/__pycache__/runtime.cpython-310.pyc,, +jinja2/__pycache__/sandbox.cpython-310.pyc,, +jinja2/__pycache__/tests.cpython-310.pyc,, +jinja2/__pycache__/utils.cpython-310.pyc,, +jinja2/__pycache__/visitor.cpython-310.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447 +jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061 +jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844 +jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862 +jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704 +jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726 +jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085 +jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396 +jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210 +jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550 +jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650 +jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406 +jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584 +jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905 +jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933 +jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568 diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b9666c8ea311ea0f0cfe7bed861aaa5469f92bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[babel.extractors] +jinja2 = jinja2.ext:babel_extract[i18n] diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/venv/lib/python3.10/site-packages/filelock/__init__.py b/venv/lib/python3.10/site-packages/filelock/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..006299d2188c5d642cf78cf19305e88c95ee01cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/__init__.py @@ -0,0 +1,52 @@ +""" +A platform independent file lock that supports the with-statement. + +.. autodata:: filelock.__version__ + :no-value: + +""" + +from __future__ import annotations + +import sys +import warnings +from typing import TYPE_CHECKING + +from ._api import AcquireReturnProxy, BaseFileLock +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock, has_fcntl +from ._windows import WindowsFileLock +from .version import version + +#: version of the project as a string +__version__: str = version + + +if sys.platform == "win32": # pragma: win32 cover + _FileLock: type[BaseFileLock] = WindowsFileLock +else: # pragma: win32 no cover # noqa: PLR5501 + if has_fcntl: + _FileLock: type[BaseFileLock] = UnixFileLock + else: + _FileLock = SoftFileLock + if warnings is not None: + warnings.warn("only soft file lock is available", stacklevel=2) + +if TYPE_CHECKING: + FileLock = SoftFileLock +else: + #: Alias for the lock, which should be used for the current platform. + FileLock = _FileLock + + +__all__ = [ + "AcquireReturnProxy", + "BaseFileLock", + "FileLock", + "SoftFileLock", + "Timeout", + "UnixFileLock", + "WindowsFileLock", + "__version__", +] diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc75c66dd12c2dfeaa7c8253e92d6efc44ecc28a Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5fd221130572761e95b1d9d6a528046e71ff15d Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb7e1c1eada0f239f8a9304324581d656accd9ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d818a4c4aede340a56d0b4563977cc11d6bd8cb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e4d77b3883063203f59ebc5ba32e5e0869b10c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a161399214b1738212de1eec50da1e9c48577933 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..887f6191706deb4bdf55e750c831026462ba115d Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97bb81dd908e18b2e670339bbd005f857b9f66c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/_api.py b/venv/lib/python3.10/site-packages/filelock/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..210b8c4138d70ac989401d9ca3fd6ab03e36c6eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_api.py @@ -0,0 +1,341 @@ +from __future__ import annotations + +import contextlib +import logging +import os +import time +import warnings +from abc import ABC, abstractmethod +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any +from weakref import WeakValueDictionary + +from ._error import Timeout + +if TYPE_CHECKING: + import sys + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: + self.lock = lock + + def __enter__(self) -> BaseFileLock: + return self.lock + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.lock.release() + + +@dataclass +class FileLockContext: + """A dataclass which holds the context for a ``BaseFileLock`` object.""" + + # The context is held in a separate class to allow optional use of thread local storage via the + # ThreadLocalFileContext class. + + #: The path to the lock file. + lock_file: str + + #: The default timeout value. + timeout: float + + #: The mode for the lock files + mode: int + + #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held + lock_file_fd: int | None = None + + #: The lock counter is used for implementing the nested locking mechanism. + lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 + + +class ThreadLocalFileContext(FileLockContext, local): + """A thread local version of the ``FileLockContext`` class.""" + + +class BaseFileLock(ABC, contextlib.ContextDecorator): + """Abstract base class for a file lock object.""" + + _instances: WeakValueDictionary[str, BaseFileLock] + + def __new__( # noqa: PLR0913 + cls, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: ARG003, FBT001, FBT002 + *, + is_singleton: bool = False, + **kwargs: dict[str, Any], # capture remaining kwargs for subclasses # noqa: ARG003 + ) -> Self: + """Create a new lock object or if specified return the singleton instance for the lock file.""" + if not is_singleton: + return super().__new__(cls) + + instance = cls._instances.get(str(lock_file)) + if not instance: + instance = super().__new__(cls) + cls._instances[str(lock_file)] = instance + elif timeout != instance.timeout or mode != instance.mode: + msg = "Singleton lock instances cannot be initialized with differing arguments" + raise ValueError(msg) + + return instance # type: ignore[return-value] # https://github.com/python/mypy/issues/15322 + + def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None: + """Setup unique state for lock subclasses.""" + super().__init_subclass__(**kwargs) + cls._instances = WeakValueDictionary() + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = 0o644, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + is_singleton: bool = False, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \ + the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \ + to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \ + ``False`` then the lock will be reentrant across threads. + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \ + per lock file. This is useful if you want to use the lock object for reentrant locking without needing \ + to pass the same object around. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + } + self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) + + def is_thread_local(self) -> bool: + """:return: a flag indicating if this lock is thread local or not""" + return self._is_thread_local + + @property + def is_singleton(self) -> bool: + """:return: a flag indicating if this lock is singleton or not""" + return self._is_singleton + + @property + def lock_file(self) -> str: + """:return: path to the lock file""" + return self._context.lock_file + + @property + def timeout(self) -> float: + """ + :return: the default timeout value, in seconds + + .. versionadded:: 2.0.0 + """ + return self._context.timeout + + @timeout.setter + def timeout(self, value: float | str) -> None: + """ + Change the default timeout value. + + :param value: the new value, in seconds + + """ + self._context.timeout = float(value) + + @property + def mode(self) -> int: + """:return: the file permissions for the lockfile""" + return self._context.mode + + @abstractmethod + def _acquire(self) -> None: + """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" + raise NotImplementedError + + @abstractmethod + def _release(self) -> None: + """Releases the lock and sets self._context.lock_file_fd to None.""" + raise NotImplementedError + + @property + def is_locked(self) -> bool: + """ + + :return: A boolean indicating if the lock file is holding the lock currently. + + .. versionchanged:: 2.0.0 + + This was previously a method and is now a property. + """ + return self._context.lock_file_fd is not None + + @property + def lock_counter(self) -> int: + """:return: The number of times this lock has been acquired (but not yet released).""" + return self._context.lock_counter + + def acquire( + self, + timeout: float | None = None, + poll_interval: float = 0.05, + *, + poll_intervall: float | None = None, + blocking: bool = True, + ) -> AcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and + if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file + :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :raises Timeout: if fails to acquire lock within the timeout period + :return: a context object that will unlock the file when the context is exited + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + .. versionchanged:: 2.0.0 + + This method returns now a *proxy* object instead of *self*, + so that it can be used in a with statement without side effects. + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if poll_intervall is not None: + msg = "use poll_interval instead of poll_intervall" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + poll_interval = poll_intervall + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + self._acquire() + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + time.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AcquireReturnProxy(lock=self) + + def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 + """ + Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. + Also note, that the lock file itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case/ + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + self._release() + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + def __enter__(self) -> Self: + """ + Acquire the lock. + + :return: the lock object + + """ + self.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + self.release(force=True) + + +__all__ = [ + "AcquireReturnProxy", + "BaseFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_error.py b/venv/lib/python3.10/site-packages/filelock/_error.py new file mode 100644 index 0000000000000000000000000000000000000000..f7ff08c0f508ad7077eb6ed1990898840c952b3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_error.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Any + + +class Timeout(TimeoutError): # noqa: N818 + """Raised when the lock could not be acquired in *timeout* seconds.""" + + def __init__(self, lock_file: str) -> None: + super().__init__() + self._lock_file = lock_file + + def __reduce__(self) -> str | tuple[Any, ...]: + return self.__class__, (self._lock_file,) # Properly pickle the exception + + def __str__(self) -> str: + return f"The file lock '{self._lock_file}' could not be acquired." + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.lock_file!r})" + + @property + def lock_file(self) -> str: + """:return: The path of the file lock.""" + return self._lock_file + + +__all__ = [ + "Timeout", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_soft.py b/venv/lib/python3.10/site-packages/filelock/_soft.py new file mode 100644 index 0000000000000000000000000000000000000000..28c67f74cc82b8f55e47afd6a71972cc1fb95eb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_soft.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES, EEXIST +from pathlib import Path + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + + +class SoftFileLock(BaseFileLock): + """Simply watches the existence of the lock file.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + # first check for exists and read-only mode as the open will mask this case as EEXIST + flags = ( + os.O_WRONLY # open for writing only + | os.O_CREAT + | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists + | os.O_TRUNC # truncate the file to zero byte + ) + try: + file_handler = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: # re-raise unless expected exception + if not ( + exception.errno == EEXIST # lock already exist + or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock + ): # pragma: win32 no cover + raise + else: + self._context.lock_file_fd = file_handler + + def _release(self) -> None: + assert self._context.lock_file_fd is not None # noqa: S101 + os.close(self._context.lock_file_fd) # the lock file is definitely not None + self._context.lock_file_fd = None + with suppress(OSError): # the file is already deleted and that's what we want + Path(self.lock_file).unlink() + + +__all__ = [ + "SoftFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_unix.py b/venv/lib/python3.10/site-packages/filelock/_unix.py new file mode 100644 index 0000000000000000000000000000000000000000..4ae1fbe916f95762418cd62251f91f74ba35fc8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_unix.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import ENOSYS +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists + +#: a flag to indicate if the fcntl API is available +has_fcntl = False +if sys.platform == "win32": # pragma: win32 cover + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + +else: # pragma: win32 no cover + try: + import fcntl + except ImportError: + pass + else: + has_fcntl = True + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + ensure_directory_exists(self.lock_file) + open_flags = os.O_RDWR | os.O_TRUNC + if not Path(self.lock_file).exists(): + open_flags |= os.O_CREAT + fd = os.open(self.lock_file, open_flags, self._context.mode) + with suppress(PermissionError): # This locked is not owned by this UID + os.fchmod(fd, self._context.mode) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as exception: + os.close(fd) + if exception.errno == ENOSYS: # NotImplemented error + msg = "FileSystem does not appear to support flock; use SoftFileLock instead" + raise NotImplementedError(msg) from exception + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + # Do not remove the lockfile: + # https://github.com/tox-dev/py-filelock/issues/31 + # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + fcntl.flock(fd, fcntl.LOCK_UN) + os.close(fd) + + +__all__ = [ + "UnixFileLock", + "has_fcntl", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_util.py b/venv/lib/python3.10/site-packages/filelock/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c671e8533873948f0e1b5575ff952c722019f067 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_util.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import os +import stat +import sys +from errno import EACCES, EISDIR +from pathlib import Path + + +def raise_on_not_writable_file(filename: str) -> None: + """ + Raise an exception if attempting to open the file for writing would fail. + + This is done so files that will never be writable can be separated from files that are writable but currently + locked. + + :param filename: file to check + :raises OSError: as if the file was opened for writing. + + """ + try: # use stat to do exists + can write to check without race condition + file_stat = os.stat(filename) # noqa: PTH116 + except OSError: + return # swallow does not exist or other errors + + if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it + if not (file_stat.st_mode & stat.S_IWUSR): + raise PermissionError(EACCES, "Permission denied", filename) + + if stat.S_ISDIR(file_stat.st_mode): + if sys.platform == "win32": # pragma: win32 cover + # On Windows, this is PermissionError + raise PermissionError(EACCES, "Permission denied", filename) + else: # pragma: win32 no cover # noqa: RET506 + # On linux / macOS, this is IsADirectoryError + raise IsADirectoryError(EISDIR, "Is a directory", filename) + + +def ensure_directory_exists(filename: Path | str) -> None: + """ + Ensure the directory containing the file exists (create it if necessary). + + :param filename: file. + + """ + Path(filename).parent.mkdir(parents=True, exist_ok=True) + + +__all__ = [ + "ensure_directory_exists", + "raise_on_not_writable_file", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_windows.py b/venv/lib/python3.10/site-packages/filelock/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..8db55dcbaa3e7bab091781b17ce22fde1fc239f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_windows.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import os +import sys +from contextlib import suppress +from errno import EACCES +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + +if sys.platform == "win32": # pragma: win32 cover + import msvcrt + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + flags = ( + os.O_RDWR # open for read and write + | os.O_CREAT # create file if not exists + | os.O_TRUNC # truncate file if not empty + ) + try: + fd = os.open(self.lock_file, flags, self._context.mode) + except OSError as exception: + if exception.errno != EACCES: # has no access to this lock + raise + else: + try: + msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) + except OSError as exception: + os.close(fd) # close file first + if exception.errno != EACCES: # file is already locked + raise + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + fd = cast(int, self._context.lock_file_fd) + self._context.lock_file_fd = None + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + os.close(fd) + + with suppress(OSError): # Probably another instance of the application hat acquired the file lock. + Path(self.lock_file).unlink() + +else: # pragma: win32 no cover + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + + +__all__ = [ + "WindowsFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/py.typed b/venv/lib/python3.10/site-packages/filelock/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/filelock/version.py b/venv/lib/python3.10/site-packages/filelock/version.py new file mode 100644 index 0000000000000000000000000000000000000000..cf2a247c31b187acc0502a58ec7062029e31f0fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '3.13.4' +__version_tuple__ = version_tuple = (3, 13, 4) diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45a18228196a239eab3c305dd6aa14b0ca177712 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a536e66d65b7311bf7800741d5d1fa4b8250a48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f8c81f74eb7014f6d08734ad96b2c874d81d689 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef97c7494dc1d6012523a36bb5c40c399cc8aae2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..408ab7cda695581e42c8504283a4d2feae7fa620 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69569150df4a2bc986ee08cb1d58c7e6d1dd57d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8ec5b4b3254ea8841067d235558747c2998b187 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2683a6323de9cb1da82ee86650e5e8a13fa0213e Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cf3c092fae7ae206d2606680f0313dae65d7bccb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py @@ -0,0 +1,195 @@ +""" +The base class for all types of datasets. +""" +import os +import re +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Optional + +from ..utils import SACREBLEU_DIR, download_file, smart_open + + +class Dataset(metaclass=ABCMeta): + def __init__( + self, + name: str, + data: Optional[List[str]] = None, + description: Optional[str] = None, + citation: Optional[str] = None, + md5: Optional[List[str]] = None, + langpairs=Dict[str, List[str]], + **kwargs, + ): + """ + Params come from the values in DATASETS. + + :param name: Name of the dataset. + :param data: URL of the raw data of the dataset. + :param description: Description of the dataset. + :param citation: Citation for the dataset. + :param md5: MD5 checksum of the dataset. + :param langpairs: List of available language pairs. + """ + self.name = name + self.data = data + self.description = description + self.citation = citation + self.md5 = md5 + self.langpairs = langpairs + self.kwargs = kwargs + + # Don't do any downloading or further processing now. + # Only do that lazily, when asked. + + # where to store the dataset + self._outdir = os.path.join(SACREBLEU_DIR, self.name) + self._rawdir = os.path.join(self._outdir, "raw") + + def maybe_download(self): + """ + If the dataset isn't downloaded, use utils/download_file() + This can be implemented here in the base class. It should write + to ~/.sacreleu/DATASET/raw exactly as it does now. + """ + os.makedirs(self._rawdir, exist_ok=True) + + expected_checksums = self.md5 if self.md5 else [None] * len(self.data) + + for url, expected_md5 in zip(self.data, expected_checksums): + tarball = os.path.join(self._rawdir, self._get_tarball_filename(url)) + + download_file( + url, tarball, extract_to=self._rawdir, expected_md5=expected_md5 + ) + + @staticmethod + def _clean(s): + """ + Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one. + + :param s: The string. + :return: A cleaned-up string. + """ + return re.sub(r"\s+", " ", s.strip()) + + def _get_tarball_filename(self, url): + """ + Produces a local filename for tarball. + :param url: The url to download. + :return: A name produced from the dataset identifier and the URL basename. + """ + return self.name.replace("/", "_") + "." + os.path.basename(url) + + def _get_txt_file_path(self, langpair, fieldname): + """ + Given the language pair and fieldname, return the path to the text file. + The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME + + :param langpair: The language pair. + :param fieldname: The fieldname. + :return: The path to the text file. + """ + # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev" + name = self.name.replace("/", "_") + # Colons are used to distinguish multiple references, but are not supported in Windows filenames + fieldname = fieldname.replace(":", "-") + return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}") + + def _get_langpair_metadata(self, langpair): + """ + Given a language pair, return the metadata for that language pair. + Deal with errors if the language pair is not available. + + :param langpair: The language pair. e.g. "en-de" + :return: Dict format which is same as self.langpairs. + """ + if langpair is None: + langpairs = self.langpairs + elif langpair not in self.langpairs: + raise Exception(f"No such language pair {self.name}/{langpair}") + else: + langpairs = {langpair: self.langpairs[langpair]} + + return langpairs + + @abstractmethod + def process_to_text(self, langpair=None) -> None: + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + pass + + def fieldnames(self, langpair) -> List[str]: + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + return ["src", "ref"] + + def __iter__(self, langpair): + """ + Iterates over all fields (source, references, and other metadata) defined + by the dataset. + """ + all_files = self.get_files(langpair) + all_fins = [smart_open(f) for f in all_files] + + for item in zip(*all_fins): + yield item + + def source(self, langpair): + """ + Return an iterable over the source lines. + """ + source_file = self.get_source_file(langpair) + with smart_open(source_file) as fin: + for line in fin: + yield line.strip() + + def references(self, langpair): + """ + Return an iterable over the references. + """ + ref_files = self.get_reference_files(langpair) + ref_fins = [smart_open(f) for f in ref_files] + + for item in zip(*ref_fins): + yield item + + def get_source_file(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + index = all_fields.index("src") + return all_files[index] + + def get_reference_files(self, langpair): + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field.startswith("ref") + ] + return ref_files + + def get_files(self, langpair): + """ + Returns the path of the source file and all reference files for + the provided test set / language pair. + Downloads the references first if they are not already local. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of the source file and all reference files + """ + fields = self.fieldnames(langpair) + files = [self._get_txt_file_path(langpair, field) for field in fields] + + for file in files: + if not os.path.exists(file): + self.process_to_text(langpair) + return files diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py new file mode 100644 index 0000000000000000000000000000000000000000..d1f638123e8742bb22e2f671c4af5bd0e556f685 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py @@ -0,0 +1,116 @@ +import os +import re + +from ..utils import smart_open +from .base import Dataset + + +class FakeSGMLDataset(Dataset): + """ + The fake SGML format used by WMT prior to 2021. Can't be properly parsed. + Source and reference(s) in separate files. + """ + + def _convert_format(self, input_file_path, output_filep_path): + """ + Extract data from raw file and convert to raw txt format. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + for line in fin: + if line.startswith("(.*).*?", "\\1", line)) + print(line, file=fout) + + def _convert_meta(self, input_file_path, field, output_filep_path): + """ + Extract metadata from document tags, projects across segments. + """ + with smart_open(input_file_path) as fin, smart_open( + output_filep_path, "wt" + ) as fout: + value = "" + for line in fin: + if line.startswith("= 2 + ), f"Each language pair in {self.name} must have at least 2 fields." + + fields = ["src"] + + if length == 2: + fields.append("ref") + else: + for i, _ in enumerate(meta[langpair][1:]): + fields.append(f"ref:{i}") + + if not self.name.startswith("wmt08"): + fields += ["docid", "genre", "origlang"] + + return fields + + +class WMTAdditionDataset(FakeSGMLDataset): + """ + Handle special case of WMT Google addition dataset. + """ + + def _convert_format(self, input_file_path, output_filep_path): + if input_file_path.endswith(".sgm"): + return super()._convert_format(input_file_path, output_filep_path) + else: + with smart_open(input_file_path) as fin: + with smart_open(output_filep_path, "wt") as fout: + for line in fin: + print(line.rstrip(), file=fout) diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..4f78bcc3ccb3447c178c1cf2e7d3aebe4e50ed1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py @@ -0,0 +1,207 @@ +import os + +import lxml.etree as ET + +from ..utils import smart_open +from .base import Dataset + +from collections import defaultdict + + +def _get_field_by_translator(translator): + if not translator: + return "ref" + else: + return f"ref:{translator}" + +class WMTXMLDataset(Dataset): + """ + The 2021+ WMT dataset format. Everything is contained in a single file. + Can be parsed with the lxml parser. + """ + @staticmethod + def _unwrap_wmt21_or_later(raw_file): + """ + Unwraps the XML file from wmt21 or later. + This script is adapted from https://github.com/wmt-conference/wmt-format-tools + + :param raw_file: The raw xml file to unwrap. + :return: Dictionary which contains the following fields: + - `src`: The source sentences. + - `docid`: ID indicating which document the sentences belong to. + - `origlang`: The original language of the document. + - `ref:{translator}`: The references produced by each translator. + - `ref`: An alias for the references from the first translator. + """ + tree = ET.parse(raw_file) + # Find and check the documents (src, ref, hyp) + src_langs, ref_langs, translators = set(), set(), set() + for src_doc in tree.getroot().findall(".//src"): + src_langs.add(src_doc.get("lang")) + + for ref_doc in tree.getroot().findall(".//ref"): + ref_langs.add(ref_doc.get("lang")) + translator = ref_doc.get("translator") + translators.add(translator) + + assert ( + len(src_langs) == 1 + ), f"Multiple source languages found in the file: {raw_file}" + assert ( + len(ref_langs) == 1 + ), f"Found {len(ref_langs)} reference languages found in the file: {raw_file}" + + src = [] + docids = [] + orig_langs = [] + domains = [] + + refs = { _get_field_by_translator(translator): [] for translator in translators } + + systems = defaultdict(list) + + src_sent_count, doc_count = 0, 0 + for doc in tree.getroot().findall(".//doc"): + docid = doc.attrib["id"] + origlang = doc.attrib["origlang"] + # present wmt22++ + domain = doc.attrib.get("domain", None) + + # Skip the testsuite + if "testsuite" in doc.attrib: + continue + + doc_count += 1 + src_sents = { + int(seg.get("id")): seg.text for seg in doc.findall(".//src//seg") + } + + def get_sents(doc): + return { + int(seg.get("id")): seg.text if seg.text else "" + for seg in doc.findall(".//seg") + } + + ref_docs = doc.findall(".//ref") + + trans_to_ref = { + ref_doc.get("translator"): get_sents(ref_doc) for ref_doc in ref_docs + } + + hyp_docs = doc.findall(".//hyp") + hyps = { + hyp_doc.get("system"): get_sents(hyp_doc) for hyp_doc in hyp_docs + } + + for seg_id in sorted(src_sents.keys()): + # no ref translation is available for this segment + if not any([value.get(seg_id, "") for value in trans_to_ref.values()]): + continue + for translator in translators: + refs[_get_field_by_translator(translator)].append( + trans_to_ref.get(translator, {translator: {}}).get(seg_id, "") + ) + src.append(src_sents[seg_id]) + for system_name in hyps.keys(): + systems[system_name].append(hyps[system_name][seg_id]) + docids.append(docid) + orig_langs.append(origlang) + if domain is not None: + domains.append(domain) + src_sent_count += 1 + + data = {"src": src, **refs, "docid": docids, "origlang": orig_langs, **systems} + if len(domains): + data["domain"] = domains + + return data + + def _get_langpair_path(self, langpair): + """ + Returns the path for this language pair. + This is useful because in WMT22, the language-pair data structure can be a dict, + in order to allow for overriding which test set to use. + """ + langpair_data = self._get_langpair_metadata(langpair)[langpair] + rel_path = langpair_data["path"] if isinstance(langpair_data, dict) else langpair_data[0] + return os.path.join(self._rawdir, rel_path) + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + + for langpair in sorted(self._get_langpair_metadata(langpair).keys()): + # The data type can be a list of paths, or a dict, containing the "path" + # and an override on which labeled reference to use (key "refs") + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + for fieldname in fields: + textfile = self._get_txt_file_path(langpair, fieldname) + + # skip if the file already exists + if os.path.exists(textfile) and os.path.getsize(textfile) > 0: + continue + + with smart_open(textfile, "w") as fout: + for line in fields[fieldname]: + print(self._clean(line), file=fout) + + def _get_langpair_allowed_refs(self, langpair): + """ + Returns the preferred references for this language pair. + This can be set in the language pair block (as in WMT22), and backs off to the + test-set-level default, or nothing. + + There is one exception. In the metadata, sometimes there is no translator field + listed (e.g., wmt22:liv-en). In this case, the reference is set to "", and the + field "ref" is returned. + """ + defaults = self.kwargs.get("refs", []) + langpair_data = self._get_langpair_metadata(langpair)[langpair] + if isinstance(langpair_data, dict): + allowed_refs = langpair_data.get("refs", defaults) + else: + allowed_refs = defaults + allowed_refs = [_get_field_by_translator(ref) for ref in allowed_refs] + + return allowed_refs + + def get_reference_files(self, langpair): + """ + Returns the requested reference files. + This is defined as a default at the test-set level, and can be overridden per language. + """ + # Iterate through the (label, file path) pairs, looking for permitted labels + allowed_refs = self._get_langpair_allowed_refs(langpair) + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field in allowed_refs + ] + return ref_files + + def fieldnames(self, langpair): + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + self.maybe_download() + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + return list(fields.keys()) diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a18c227748fd59cb1848539ea88b045b75e5dc64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py @@ -0,0 +1,11 @@ +"""The implementation of various metrics.""" + +from .bleu import BLEU, BLEUScore # noqa: F401 +from .chrf import CHRF, CHRFScore # noqa: F401 +from .ter import TER, TERScore # noqa: F401 + +METRICS = { + 'BLEU': BLEU, + 'CHRF': CHRF, + 'TER': TER, +} diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5b97682f821a31b0282aaecf1b650f88655ea33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1371a0247c69703a8c36f84d89e3a8533757413 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59313666c3e56bb4e633e5687d6d2c04e2fe8fa9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9155b92e53a475406de68d1dd808bca618751a86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c22bf0d6a0bef2bb26df8db8f944c602e7e96490 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c499cdd68090594315af10613eef88a7e371071 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeb5ac0d8d8c115eca2bd31a2ce00fbf3e458f40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py new file mode 100644 index 0000000000000000000000000000000000000000..93fb10815a1a8b08c69bad19d2cbed58e251afc7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py @@ -0,0 +1,438 @@ +"""The base `Score`, `Metric` and `Signature` classes to derive from. + +`Metric` is an abstract class that enforces the implementation of a set +of abstract methods. This way, a correctly implemented metric will work +seamlessly with the rest of the codebase. +""" + +import json +import logging +import statistics +from typing import List, Sequence, Any, Optional, Dict +from abc import ABCMeta, abstractmethod + +from .. import __version__ + +sacrelogger = logging.getLogger('sacrebleu') + + +class Score: + """A base score class to derive from. + + :param name: The name of the underlying metric. + :param score: A floating point number for the final metric. + """ + def __init__(self, name: str, score: float): + """`Score` initializer.""" + self.name = name + self.score = score + + # Statistical test related fields + self._mean = -1.0 + self._ci = -1.0 + + # More info can be added right after the score + self._verbose = '' + + def format(self, width: int = 2, score_only: bool = False, + signature: str = '', is_json: bool = False) -> str: + """Returns a pretty representation of the score. + :param width: Floating point decimal precision width. + :param score_only: If `True`, and the format is not `json`, + returns a single score string. + :param signature: A string representation of the given `Signature` + instance. + :param is_json: If `True`, will output the score in JSON string. + :return: A plain or JSON-formatted string representation. + """ + d = { + 'name': self.name, + 'score': float(f'{self.score:.{width}f}'), + 'signature': signature, + } + + sc = f'{self.score:.{width}f}' + + if self._mean > 0: + confidence_mean = f'{self._mean:.{width}f}' + confidence_var = f'{self._ci:.{width}f}' + confidence_str = f'μ = {confidence_mean} ± {confidence_var}' + + sc += f' ({confidence_str})' + if is_json: + d['confidence_mean'] = float(confidence_mean) + d['confidence_var'] = float(confidence_var) + d['confidence'] = confidence_str + + # Construct full score line + full_score = f"{self.name}|{signature}" if signature else self.name + full_score = f"{full_score} = {sc}" + if self._verbose: + full_score += f' {self._verbose}' + d['verbose_score'] = self._verbose + + if score_only: + return sc + + if is_json: + for param in signature.split('|'): + key, value = param.split(':') + d[key] = value + return json.dumps(d, indent=1, ensure_ascii=False) + + return full_score + + def estimate_ci(self, scores: List['Score']): + """Takes a list of scores and stores mean, stdev and 95% confidence + interval around the mean. + + :param scores: A list of `Score` objects obtained from bootstrap + resampling for example. + """ + # Sort the scores + raw_scores = sorted([x.score for x in scores]) + n = len(raw_scores) + + # Get CI bounds (95%, i.e. 1/40 from left) + lower_idx = n // 40 + upper_idx = n - lower_idx - 1 + lower, upper = raw_scores[lower_idx], raw_scores[upper_idx] + self._ci = 0.5 * (upper - lower) + self._mean = statistics.mean(raw_scores) + + def __repr__(self): + """Returns a human readable score string.""" + return self.format() + + +class Signature: + """A convenience class to represent sacreBLEU reproducibility signatures. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`Signature` initializer.""" + # Global items that are shared across all metrics + self._abbr = { + 'version': 'v', + 'nrefs': '#', + 'test': 't', + 'lang': 'l', + 'subset': 'S', + 'origlang': 'o', + 'bs': 'bs', # Bootstrap resampling trials + 'ar': 'ar', # Approximate randomization trials + 'seed': 'rs', # RNG's seed + } + + if 'num_refs' not in args: + raise ValueError( + 'Number of references unknown, please evaluate the metric first.') + + num_refs = args['num_refs'] + if num_refs == -1: + # Detect variable number of refs + num_refs = 'var' + + # Global items that are shared across all metrics + # None's will be ignored + self.info = { + 'version': __version__, + 'nrefs': num_refs, + 'bs': args.get('n_bootstrap', None), + 'ar': None, + 'seed': args.get('seed', None), + 'test': args.get('test_set', None), + 'lang': args.get('langpair', None), + 'origlang': args.get('origlang', None), + 'subset': args.get('subset', None), + } + + def format(self, short: bool = False) -> str: + """Returns a string representation of the signature. + + :param short: If True, shortened signature is produced. + :return: A string representation of the signature. + """ + pairs = [] + keys = list(self.info.keys()) + # keep version always at end + keys.remove('version') + for name in keys + ['version']: + value = self.info[name] + if value is not None: + if isinstance(value, bool): + # Replace True/False with yes/no + value = 'yes' if value else 'no' + final_name = self._abbr[name] if short else name + pairs.append(f'{final_name}:{value}') + + return '|'.join(pairs) + + def update(self, key: str, value: Any): + """Add a new item or update an existing one. + + :param key: The key to use in the dictionary. + :param value: The associated value for the `key`. + """ + self.info[key] = value + + def __str__(self): + """Returns a human-readable signature string.""" + return self.format() + + def __repr__(self): + """Returns a human-readable signature string.""" + return self.format() + + +class Metric(metaclass=ABCMeta): + """A base class for all metrics that ensures the implementation of some + methods. Much of the common functionality is moved to this base class + from other metrics.""" + + # Each metric should define its Signature class' name here + _SIGNATURE_TYPE = Signature + + def __init__(self): + """`Metric` initializer.""" + # The pre-computed reference cache + self._ref_cache = None + + # only useful for BLEU tokenized warnings. Set to True so that + # warnings are not issued for other metrics. + self._force = True + + # Will be used by the signature when bootstrap resampling + self.n_bootstrap = None + self.seed = None + + def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]): + """Performs sanity checks on `sentence_score` method's arguments. + + :param hyp: A single hypothesis string. + :param refs: A sequence of reference strings. + """ + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyp, str): + err_msg = 'The argument `hyp` should be a string.' + elif isinstance(refs, str) or not isinstance(refs, Sequence): + err_msg = 'The argument `refs` should be a sequence of strings.' + elif not isinstance(refs[0], str) and refs[0] is not None: + err_msg = 'Each element of `refs` should be a string.' + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + def _check_corpus_score_args(self, hyps: Sequence[str], + refs: Optional[Sequence[Sequence[str]]]): + """Performs sanity checks on `corpus_score` method's arguments. + + :param hypses: A sequence of hypothesis strings. + :param refs: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + """ + + prefix = self.__class__.__name__ + err_msg = None + + if not isinstance(hyps, Sequence): + err_msg = "`hyps` should be a sequence of strings." + elif not isinstance(hyps[0], str): + err_msg = 'Each element of `hyps` should be a string.' + elif any(line is None for line in hyps): + err_msg = "Undefined line in hypotheses stream!" + + if refs is not None: + if not isinstance(refs, Sequence): + err_msg = "`refs` should be a sequence of sequence of strings." + elif not isinstance(refs[0], Sequence): + err_msg = "Each element of `refs` should be a sequence of strings." + elif not isinstance(refs[0][0], str) and refs[0][0] is not None: + err_msg = "`refs` should be a sequence of sequence of strings." + + if err_msg: + raise TypeError(f'{prefix}: {err_msg}') + + @abstractmethod + def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any: + """Computes the final score given the pre-computed match statistics. + + :param stats: A list of segment-level statistics. + :return: A `Score` instance. + """ + pass + + @abstractmethod + def _compute_score_from_stats(self, stats: List[Any]) -> Any: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `Score` object. + """ + pass + + @abstractmethod + def _preprocess_segment(self, sent: str) -> str: + """A wrapper around the metric's tokenization and pre-processing logic. + This should be implemented for reference caching to work correctly. + + :param sent: The input sentence. + :return: The pre-processed output sentence. + """ + pass + + @abstractmethod + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, extract the required + information (such as n-grams for BLEU and chrF). This should be implemented + for the generic `_cache_references()` to work across all metrics. + + :param refs: A sequence of strings. + """ + pass + + @abstractmethod + def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]: + """Given a (pre-processed) hypothesis sentence and already computed + reference info, returns the best match statistics across the + references. The return type is usually a List of ints or floats. + + :param hypothesis: A pre-processed hypothesis sentence. + :param ref_kwargs: A dictionary with reference-related information + within. This is formulated as a dictionary as different metrics may + require different information regarding a reference segment. + """ + pass + + def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]: + """Given the full set of document references, extract segment n-grams + (or other necessary information) for caching purposes. + + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. A particular reference + segment can be '' or `None` to allow the use of variable number + of references per segment. + :return: A list where each element is a tuple of segment n-grams and + reference lengths, as returned by `_extract_reference_info()`. + """ + ref_cache = [] + + # Decide on final number of refs here as well + num_refs = set() + + for refs in zip(*references): + # Remove undefined references + lines = [x for x in refs if x is not None] + + # Keep track of reference counts to allow variable reference + # info in the signature + num_refs.add(len(lines)) + + lines = [self._preprocess_segment(x) for x in lines] + + # Get n-grams + ref_cache.append(self._extract_reference_info(lines)) + + if len(num_refs) == 1: + self.num_refs = list(num_refs)[0] + else: + # A variable number of refs exist + self.num_refs = -1 + + return ref_cache + + def _extract_corpus_statistics(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]]) -> Any: + """Reads the corpus and returns sentence-level match statistics for + faster re-computations esp. during statistical tests. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :return: A list where each sublist corresponds to segment statistics. + """ + # Pre-compute references + # Don't store the cache as the user is explicitly passing refs + if references: + ref_cache = self._cache_references(references) + elif self._ref_cache: + ref_cache = self._ref_cache + else: + raise RuntimeError('No references provided and the cache is empty.') + + stats = [] + tok_count = 0 + + for hyp, ref_kwargs in zip(hypotheses, ref_cache): + # Check for already-tokenized input problem (only for BLEU) + if not self._force and hyp.endswith(' .'): + tok_count += 1 + + hyp = self._preprocess_segment(hyp) + + # Collect stats + stats.append(self._compute_segment_statistics(hyp, ref_kwargs)) + + if tok_count >= 100: + sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')") + sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.") + sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.") + + return stats + + def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any: + """Compute the metric for a single sentence against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :return: A `Score` object. + """ + self._check_sentence_score_args(hypothesis, references) + + stats = self._extract_corpus_statistics( + [hypothesis], [[refs] for refs in references]) + return self._aggregate_and_compute(stats) + + def corpus_score(self, hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + n_bootstrap: int = 1) -> Any: + """Compute the metric for a corpus against a single (or multiple) reference(s). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, cached references + will be used. + :param n_bootstrap: If > 1, provides 95% confidence interval around true mean + using bootstrap resampling with `n_bootstrap` samples. + :return: A `Score` object. + """ + self._check_corpus_score_args(hypotheses, references) + + # Collect corpus stats + stats = self._extract_corpus_statistics(hypotheses, references) + + # Compute the actual system score + actual_score = self._aggregate_and_compute(stats) + + if n_bootstrap > 1: + # Compute bootstrap estimate as well + # Delayed import is to escape from numpy import if bootstrap + # is not requested. + from ..significance import _bootstrap_resample + + self.n_bootstrap = n_bootstrap + self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap) + actual_score.estimate_ci(bs_scores) + + return actual_score + + def get_signature(self) -> Signature: + """Creates and returns the signature for the metric. The creation + of signatures is delayed as the number of references is resolved + only at the point of reference caching.""" + return self._SIGNATURE_TYPE(self.__dict__) diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca77b9af5c4ecc77acde3b7816607d11cd4bc7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py @@ -0,0 +1,420 @@ +"""The implementation of the BLEU metric (Papineni et al., 2002).""" + +import math +import logging +from importlib import import_module +from typing import List, Sequence, Optional, Dict, Any + +from ..utils import my_log, sum_of_lists + +from .base import Score, Signature, Metric +from .helpers import extract_all_word_ngrams + +sacrelogger = logging.getLogger('sacrebleu') + +# The default for the maximum n-gram order when computing precisions +MAX_NGRAM_ORDER = 4 + +_TOKENIZERS = { + 'none': 'tokenizer_none.NoneTokenizer', + 'zh': 'tokenizer_zh.TokenizerZh', + '13a': 'tokenizer_13a.Tokenizer13a', + 'intl': 'tokenizer_intl.TokenizerV14International', + 'char': 'tokenizer_char.TokenizerChar', + 'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab', + 'ko-mecab': 'tokenizer_ko_mecab.TokenizerKoMecab', + 'spm': 'tokenizer_spm.TokenizerSPM', + 'flores101': 'tokenizer_spm.Flores101Tokenizer', + 'flores200': 'tokenizer_spm.Flores200Tokenizer', +} + + +def _get_tokenizer(name: str): + """Dynamically import tokenizer as importing all is slow.""" + module_name, class_name = _TOKENIZERS[name].rsplit('.', 1) + return getattr( + import_module(f'.tokenizers.{module_name}', 'sacrebleu'), + class_name) + + +class BLEUSignature(Signature): + """A convenience class to represent the reproducibility signature for BLEU. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`BLEUSignature` initializer.""" + super().__init__(args) + + self._abbr.update({ + 'case': 'c', + 'eff': 'e', + 'tok': 'tok', + 'smooth': 's', + }) + + # Construct a combined string for smoothing method and value + smooth_str = args['smooth_method'] + smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str] + + # If the method requires a parameter, add it within brackets + if smooth_def is not None: + # the following can be None if the user wants to use the default + smooth_val = args['smooth_value'] + + if smooth_val is None: + smooth_val = smooth_def + + smooth_str += f'[{smooth_val:.2f}]' + + self.info.update({ + 'case': 'lc' if args['lowercase'] else 'mixed', + 'eff': 'yes' if args['effective_order'] else 'no', + 'tok': args['tokenizer_signature'], + 'smooth': smooth_str, + }) + + +class BLEUScore(Score): + """A convenience class to represent BLEU scores. + + :param score: The BLEU score. + :param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order + :param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order + :param precisions: List of precisions, 1 <= n <= max_ngram_order + :param bp: The brevity penalty. + :param sys_len: The cumulative system length. + :param ref_len: The cumulative reference length. + """ + def __init__(self, score: float, counts: List[int], totals: List[int], + precisions: List[float], bp: float, + sys_len: int, ref_len: int): + """`BLEUScore` initializer.""" + super().__init__('BLEU', score) + self.bp = bp + self.counts = counts + self.totals = totals + self.sys_len = sys_len + self.ref_len = ref_len + self.precisions = precisions + + self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions]) + self.ratio = self.sys_len / self.ref_len if self.ref_len else 0 + + # The verbose part of BLEU + self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} " + self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} " + self._verbose += f"ref_len = {self.ref_len:d})" + + +class BLEU(Metric): + """Computes the BLEU metric given hypotheses and references. + + :param lowercase: If True, lowercased BLEU is computed. + :param force: Ignore data that looks already tokenized. + :param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none'). + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions. + :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be + `True`, if sentence-level BLEU will be computed. + :param trg_lang: An optional language code to raise potential tokenizer warnings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference n-grams + and lengths will be pre-computed and cached for faster BLEU computation + across many systems. + """ + + SMOOTH_DEFAULTS: Dict[str, Optional[float]] = { + # The defaults for `floor` and `add-k` are obtained from the following paper + # A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU + # Boxing Chen and Colin Cherry + # http://aclweb.org/anthology/W14-3346 + 'none': None, # No value is required + 'floor': 0.1, + 'add-k': 1, + 'exp': None, # No value is required + } + + TOKENIZERS = _TOKENIZERS.keys() + + # mteval-v13a.pl tokenizer unless Chinese or Japanese is provided + TOKENIZER_DEFAULT = '13a' + + # Some language specific mappings to use if `trg_lang` is given + # and the tokenizer is not explicitly specified + _TOKENIZER_MAP = { + 'zh': 'zh', + 'ja': 'ja-mecab', + 'ko': 'ko-mecab', + } + + _SIGNATURE_TYPE = BLEUSignature + + def __init__(self, lowercase: bool = False, + force: bool = False, + tokenize: Optional[str] = None, + smooth_method: str = 'exp', + smooth_value: Optional[float] = None, + max_ngram_order: int = MAX_NGRAM_ORDER, + effective_order: bool = False, + trg_lang: str = '', + references: Optional[Sequence[Sequence[str]]] = None): + """`BLEU` initializer.""" + super().__init__() + + self._force = force + self.trg_lang = trg_lang + self.lowercase = lowercase + self.smooth_value = smooth_value + self.smooth_method = smooth_method + self.max_ngram_order = max_ngram_order + self.effective_order = effective_order + + # Sanity check + assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \ + "Unknown smooth_method {self.smooth_method!r}" + + # If the tokenizer wasn't specified, choose it according to the + # following logic. We use 'v13a' except for ZH and JA. Note that + # this logic can only be applied when sacrebleu knows the target + # language, which is only the case for builtin datasets. + if tokenize is None: + best_tokenizer = self.TOKENIZER_DEFAULT + + # Set `zh` or `ja-mecab` or `ko-mecab` if target language is provided + if self.trg_lang in self._TOKENIZER_MAP: + best_tokenizer = self._TOKENIZER_MAP[self.trg_lang] + else: + best_tokenizer = tokenize + if self.trg_lang == 'zh' and best_tokenizer != 'zh': + sacrelogger.warning( + "Consider using the 'zh' or 'spm' tokenizer for Chinese.") + if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab': + sacrelogger.warning( + "Consider using the 'ja-mecab' or 'spm' tokenizer for Japanese.") + if self.trg_lang == 'ko' and best_tokenizer != 'ko-mecab': + sacrelogger.warning( + "Consider using the 'ko-mecab' or 'spm' tokenizer for Korean.") + + # Create the tokenizer + self.tokenizer = _get_tokenizer(best_tokenizer)() + + # Build the signature + self.tokenizer_signature = self.tokenizer.signature() + + if references is not None: + # Pre-compute reference ngrams and lengths + self._ref_cache = self._cache_references(references) + + @staticmethod + def compute_bleu(correct: List[int], + total: List[int], + sys_len: int, + ref_len: int, + smooth_method: str = 'none', + smooth_value=None, + effective_order: bool = False, + max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore: + """Computes BLEU score from its sufficient statistics with smoothing. + + Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU", + Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346) + + - none: No smoothing. + - floor: Method 1 (requires small positive value (0.1 in the paper) to be set) + - add-k: Method 2 (Generalizing Lin and Och, 2004) + - exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl) + + :param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order + :param total: List of counts of total ngrams, 1 <= n <= max_ngram_order + :param sys_len: The cumulative system length + :param ref_len: The cumulative reference length + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be + `True`, if sentence-level BLEU will be computed. + :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions. + :return: A `BLEUScore` instance. + """ + assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \ + "Unknown smooth_method {smooth_method!r}" + + # Fetch the default value for floor and add-k + if smooth_value is None: + smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method] + + # Compute brevity penalty + if sys_len < ref_len: + bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0 + else: + bp = 1.0 + + # n-gram precisions + precisions = [0.0 for x in range(max_ngram_order)] + + # Early stop if there are no matches (#141) + if not any(correct): + return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len) + + smooth_mteval = 1. + eff_order = max_ngram_order + for n in range(1, len(precisions) + 1): + if smooth_method == 'add-k' and n > 1: + correct[n - 1] += smooth_value + total[n - 1] += smooth_value + + if total[n - 1] == 0: + break + + # If the system guesses no i-grams, 1 <= i <= max_ngram_order, + # the BLEU score is 0 (technically undefined). This is a problem for sentence + # level BLEU or a corpus of short sentences, where systems will get + # no credit if sentence lengths fall under the max_ngram_order threshold. + # This fix scales max_ngram_order to the observed maximum order. + # It is only available through the API and off by default + if effective_order: + eff_order = n + + if correct[n - 1] == 0: + if smooth_method == 'exp': + smooth_mteval *= 2 + precisions[n - 1] = 100. / (smooth_mteval * total[n - 1]) + elif smooth_method == 'floor': + precisions[n - 1] = 100. * smooth_value / total[n - 1] + else: + precisions[n - 1] = 100. * correct[n - 1] / total[n - 1] + + # Compute BLEU score + score = bp * math.exp( + sum([my_log(p) for p in precisions[:eff_order]]) / eff_order) + + return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len) + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, lowercases (optionally) and tokenizes it + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + if self.lowercase: + sent = sent.lower() + return self.tokenizer(sent.rstrip()) + + def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `BLEUScore` object. + """ + return self.compute_bleu( + correct=stats[2: 2 + self.max_ngram_order], + total=stats[2 + self.max_ngram_order:], + sys_len=int(stats[0]), ref_len=int(stats[1]), + smooth_method=self.smooth_method, smooth_value=self.smooth_value, + effective_order=self.effective_order, + max_ngram_order=self.max_ngram_order + ) + + def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore: + """Computes the final BLEU score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `BLEUScore` instance. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int: + """Given a hypothesis length and a list of reference lengths, returns + the closest reference length to be used by BLEU. + + :param hyp_len: The hypothesis length. + :param ref_lens: A list of reference lengths. + :return: The closest reference length. + """ + closest_diff, closest_len = -1, -1 + + for ref_len in ref_lens: + diff = abs(hyp_len - ref_len) + if closest_diff == -1 or diff < closest_diff: + closest_diff = diff + closest_len = ref_len + elif diff == closest_diff and ref_len < closest_len: + closest_len = ref_len + + return closest_len + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, extract the n-grams and reference lengths. + The latter will be useful when comparing hypothesis and reference lengths for BLEU. + + :param refs: A sequence of strings. + :return: A dictionary that will be passed to `_compute_segment_statistics()` + through keyword arguments. + """ + ngrams = None + ref_lens = [] + + for ref in refs: + # extract n-grams for this ref + this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order) + ref_lens.append(ref_len) + + if ngrams is None: + # Set it directly for first set of refs + ngrams = this_ngrams + else: + # Merge counts across multiple references + # The below loop is faster than `ngrams |= this_ngrams` + for ngram, count in this_ngrams.items(): + ngrams[ngram] = max(ngrams[ngram], count) + + return {'ref_ngrams': ngrams, 'ref_lens': ref_lens} + + def _compute_segment_statistics(self, hypothesis: str, + ref_kwargs: Dict) -> List[int]: + """Given a (pre-processed) hypothesis sentence and already computed + reference n-grams & lengths, returns the best match statistics across the + references. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys + that denote the counter containing all n-gram counts and reference lengths, + respectively. + :return: A list of integers with match statistics. + """ + + ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens'] + + # Extract n-grams for the hypothesis + hyp_ngrams, hyp_len = extract_all_word_ngrams( + hypothesis, 1, self.max_ngram_order) + + ref_len = self._get_closest_ref_len(hyp_len, ref_lens) + + # Count the stats + # Although counter has its internal & and | operators, this is faster + correct = [0 for i in range(self.max_ngram_order)] + total = correct[:] + for hyp_ngram, hyp_count in hyp_ngrams.items(): + # n-gram order + n = len(hyp_ngram) - 1 + # count hypothesis n-grams + total[n] += hyp_count + # count matched n-grams + if hyp_ngram in ref_ngrams: + correct[n] += min(hyp_count, ref_ngrams[hyp_ngram]) + + # Return a flattened list for efficient computation + return [hyp_len, ref_len] + correct + total + + def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore: + """Compute the metric for a single sentence against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :return: a `BLEUScore` object. + """ + if not self.effective_order: + sacrelogger.warning( + 'It is recommended to enable `effective_order` for sentence-level BLEU.') + return super().sentence_score(hypothesis, references) diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d4f6858d0c6005f97ad8011a0b17bd97c2bcea --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py @@ -0,0 +1,284 @@ +"""The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics.""" + +from typing import List, Sequence, Optional, Dict +from collections import Counter + +from ..utils import sum_of_lists +from .base import Score, Signature, Metric +from .helpers import extract_all_char_ngrams, extract_word_ngrams + + +class CHRFSignature(Signature): + """A convenience class to represent the reproducibility signature for chrF. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`CHRFSignature` initializer.""" + super().__init__(args) + self._abbr.update({ + 'case': 'c', + 'eff': 'e', + 'nc': 'nc', + 'nw': 'nw', + 'space': 's', + }) + + self.info.update({ + 'case': 'lc' if args['lowercase'] else 'mixed', + 'eff': 'yes' if not args['eps_smoothing'] else 'no', + 'nc': args['char_order'], + 'nw': args['word_order'], + 'space': 'yes' if args['whitespace'] else 'no', + }) + + +class CHRFScore(Score): + """A convenience class to represent chrF scores. + + :param score: The chrF (chrF++) score. + :param char_order: The character n-gram order. + :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + """ + def __init__(self, score: float, char_order: int, word_order: int, beta: int): + """`CHRFScore` initializer.""" + self.beta = beta + self.char_order = char_order + self.word_order = word_order + + # Add + signs to denote chrF+ variant + name = f'chrF{self.beta}' + '+' * self.word_order + + super().__init__(name, score) + + +class CHRF(Metric): + """Computes the chrF(++) metric given hypotheses and references. + + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param lowercase: Enable case-insensitivity. + :param whitespace: If `True`, include whitespaces when extracting character n-grams. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference n-grams + will be pre-computed and cached for faster re-computation across many systems. + """ + + # Maximum character n-gram order to take into account + CHAR_ORDER = 6 + + # chrF+ additionally takes into account some of the word n-grams + WORD_ORDER = 0 + + # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341) + BETA = 2 + + # Cache string.punctuation for chrF+' punctuation stripper + _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~') + + _SIGNATURE_TYPE = CHRFSignature + + def __init__(self, char_order: int = CHAR_ORDER, + word_order: int = WORD_ORDER, + beta: int = BETA, + lowercase: bool = False, + whitespace: bool = False, + eps_smoothing: bool = False, + references: Optional[Sequence[Sequence[str]]] = None): + """`CHRF` initializer.""" + super().__init__() + + self.beta = beta + self.char_order = char_order + self.word_order = word_order + self.order = self.char_order + self.word_order + self.lowercase = lowercase + self.whitespace = whitespace + self.eps_smoothing = eps_smoothing + + if references is not None: + # Pre-compute reference ngrams + self._ref_cache = self._cache_references(references) + + @staticmethod + def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]: + """Computes the match statistics between hypothesis and reference n-grams. + + :param hyp_ngrams: A `Counter` holding hypothesis n-grams. + :param ref_ngrams: A `Counter` holding reference n-grams. + :return: A list of three numbers denoting hypothesis n-gram count, + reference n-gram count and the intersection count. + """ + # Counter's internal intersection is not that fast, count manually + match_count, hyp_count = 0, 0 + for ng, count in hyp_ngrams.items(): + hyp_count += count + if ng in ref_ngrams: + match_count += min(count, ref_ngrams[ng]) + + return [ + # Don't count hits if no reference exists for that n-gram + hyp_count if ref_ngrams else 0, + sum(ref_ngrams.values()), + match_count, + ] + + def _remove_punctuation(self, sent: str) -> List[str]: + """Separates out punctuations from beginning and end of words for chrF. + Adapted from https://github.com/m-popovic/chrF + + :param sent: A string. + :return: A list of words. + """ + tokenized = [] + for w in sent.split(): + if len(w) == 1: + tokenized.append(w) + else: + # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124) + if w[-1] in self._PUNCTS: + tokenized += [w[:-1], w[-1]] + elif w[0] in self._PUNCTS: + tokenized += [w[0], w[1:]] + else: + tokenized.append(w) + return tokenized + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, apply optional lowercasing. + + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + return sent.lower() if self.lowercase else sent + + def _compute_f_score(self, statistics: List[int]) -> float: + """Compute the chrF score given the n-gram match statistics. + + :param statistics: A flattened list of 3 * (`char_order` + `word_order`) + elements giving the [hyp, ref, match] counts for each order. + :return: The final f_beta score between [0, 100]. + """ + eps = 1e-16 + score = 0.0 + effective_order = 0 + factor = self.beta ** 2 + avg_prec, avg_rec = 0.0, 0.0 + + for i in range(self.order): + n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3] + + # chrF++.py style EPS smoothing (also used by Moses and NLTK) + prec = n_match / n_hyp if n_hyp > 0 else eps + rec = n_match / n_ref if n_ref > 0 else eps + + denom = factor * prec + rec + score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps + + # sacreBLEU <2.0.0 style effective order smoothing + if n_hyp > 0 and n_ref > 0: + avg_prec += prec + avg_rec += rec + effective_order += 1 + + if self.eps_smoothing: + return 100 * score / self.order + + if effective_order == 0: + avg_prec = avg_rec = 0.0 + else: + avg_prec /= effective_order + avg_rec /= effective_order + + if avg_prec + avg_rec: + score = (1 + factor) * avg_prec * avg_rec + score /= ((factor * avg_prec) + avg_rec) + return 100 * score + else: + return 0.0 + + def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `CHRFScore` object. + """ + return CHRFScore( + self._compute_f_score(stats), self.char_order, + self.word_order, self.beta) + + def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore: + """Computes the final score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `CHRFScore` object. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]: + """Given a list of reference segments, extract the character and word n-grams. + + :param refs: A sequence of reference segments. + :return: A list where each element contains n-grams per reference segment. + """ + ngrams = [] + + for ref in refs: + # extract character n-grams + stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace) + + # Check chrF+ mode + if self.word_order > 0: + ref_words = self._remove_punctuation(ref) + + for n in range(self.word_order): + stats.append(extract_word_ngrams(ref_words, n + 1)) + + ngrams.append(stats) + + return {'ref_ngrams': ngrams} + + def _compute_segment_statistics( + self, hypothesis: str, ref_kwargs: Dict) -> List[int]: + """Given a (pre-processed) hypothesis sentence and already computed + reference n-grams, returns the best match statistics across the + references. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list + where each sublist contains n-gram counters for a particular reference sentence. + :return: A list of integers where each triplet denotes [hyp, ref, match] + statistics. + """ + best_stats = [] + best_f_score = -1.0 + + # extract character n-grams + all_hyp_ngrams = extract_all_char_ngrams( + hypothesis, self.char_order, self.whitespace) + + # Check chrF+ mode to see if we'll add word n-grams as well + if self.word_order > 0: + # Primitive tokenization: separate out punctuations + hwords = self._remove_punctuation(hypothesis) + _range = range(1, self.word_order + 1) + all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range]) + + # Iterate over multiple references, pick the one with best F score + for _ref_ngrams in ref_kwargs['ref_ngrams']: + stats = [] + # Traverse all orders + for h, r in zip(all_hyp_ngrams, _ref_ngrams): + stats.extend(self._get_match_statistics(h, r)) + f_score = self._compute_f_score(stats) + + if f_score > best_f_score: + best_f_score = f_score + best_stats = stats + + return best_stats diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..72ec14461658249fcd63a139623f3ead9a4aa057 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py @@ -0,0 +1,69 @@ +"""Various utility functions for word and character n-gram extraction.""" + +from collections import Counter +from typing import List, Tuple + + +def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]: + """Extracts all ngrams (min_order <= n <= max_order) from a sentence. + + :param line: A string sentence. + :param min_order: Minimum n-gram order. + :param max_order: Maximum n-gram order. + :return: a Counter object with n-grams counts and the sequence length. + """ + + ngrams = [] + tokens = line.split() + + for n in range(min_order, max_order + 1): + for i in range(0, len(tokens) - n + 1): + ngrams.append(tuple(tokens[i: i + n])) + + return Counter(ngrams), len(tokens) + + +def extract_word_ngrams(tokens: List[str], n: int) -> Counter: + """Extracts n-grams with order `n` from a list of tokens. + + :param tokens: A list of tokens. + :param n: The order of n-grams. + :return: a Counter object with n-grams counts. + """ + return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)]) + + +def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter: + """Yields counts of character n-grams from a sentence. + + :param line: A segment containing a sequence of words. + :param n: The order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a dictionary containing ngrams and counts + """ + if not include_whitespace: + line = ''.join(line.split()) + + return Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + + +def extract_all_char_ngrams( + line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]: + """Extracts all character n-grams at once for convenience. + + :param line: A segment containing a sequence of words. + :param max_order: The maximum order of the n-grams. + :param include_whitespace: If given, will not strip whitespaces from the line. + :return: a list of Counter objects containing ngrams and counts. + """ + + counters = [] + + if not include_whitespace: + line = ''.join(line.split()) + + for n in range(1, max_order + 1): + ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)]) + counters.append(ngrams) + + return counters diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py new file mode 100644 index 0000000000000000000000000000000000000000..2d2de4944c955ebf0c8b37fce7f04eb16f79c026 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py @@ -0,0 +1,478 @@ +"""This module implements various utility functions for the TER metric.""" + +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math +from typing import List, Tuple, Dict + + +_COST_INS = 1 +_COST_DEL = 1 +_COST_SUB = 1 + +# Tercom-inspired limits +_MAX_SHIFT_SIZE = 10 +_MAX_SHIFT_DIST = 50 +_BEAM_WIDTH = 25 + +# Our own limits +_MAX_CACHE_SIZE = 10000 +_MAX_SHIFT_CANDIDATES = 1000 +_INT_INFINITY = int(1e16) + +_OP_INS = 'i' +_OP_DEL = 'd' +_OP_NOP = ' ' +_OP_SUB = 's' +_OP_UNDEF = 'x' + +_FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS) + + +def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]: + """Calculate the translation edit rate. + + :param words_hyp: Tokenized translation hypothesis. + :param words_ref: Tokenized reference translation. + :return: tuple (number of edits, length) + """ + n_words_ref = len(words_ref) + n_words_hyp = len(words_hyp) + if n_words_ref == 0: + # FIXME: This trace here is not used? + trace = _OP_DEL * n_words_hyp + # special treatment of empty refs + return n_words_hyp, 0 + + cached_ed = BeamEditDistance(words_ref) + shifts = 0 + + input_words = words_hyp + checked_candidates = 0 + while True: + # do shifts until they stop reducing the edit distance + delta, new_input_words, checked_candidates = _shift( + input_words, words_ref, cached_ed, checked_candidates) + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if delta <= 0: + break + shifts += 1 + input_words = new_input_words + + edit_distance, trace = cached_ed(input_words) + total_edits = shifts + edit_distance + + return total_edits, n_words_ref + + +def _shift(words_h: List[str], words_r: List[str], cached_ed, + checked_candidates: int) -> Tuple[int, List[str], int]: + """Attempt to shift words in hypothesis to match reference. + + Returns the shift that reduces the edit distance the most. + + Note that the filtering of possible shifts and shift selection are heavily + based on somewhat arbitrary heuristics. The code here follows as closely + as possible the logic in Tercom, not always justifying the particular design + choices. + + :param words_h: Hypothesis. + :param words_r: Reference. + :param cached_ed: Cached edit distance. + :param checked_candidates: Number of shift candidates that were already + evaluated. + :return: (score, shifted_words, checked_candidates). Best shift and updated + number of evaluated shift candidates. + """ + pre_score, inv_trace = cached_ed(words_h) + + # to get alignment, we pretend we are rewriting reference into hypothesis, + # so we need to flip the trace of edit operations + trace = _flip_trace(inv_trace) + align, ref_err, hyp_err = trace_to_alignment(trace) + + best = None + + for start_h, start_r, length in _find_shifted_pairs(words_h, words_r): + # don't do the shift unless both the hypothesis was wrong and the + # reference doesn't match hypothesis at the target position + if sum(hyp_err[start_h: start_h + length]) == 0: + continue + + if sum(ref_err[start_r: start_r + length]) == 0: + continue + + # don't try to shift within the subsequence + if start_h <= align[start_r] < start_h + length: + continue + + prev_idx = -1 + for offset in range(-1, length): + if start_r + offset == -1: + idx = 0 # insert before the beginning + elif start_r + offset in align: + # Unlike Tercom which inserts *after* the index, we insert + # *before* the index. + idx = align[start_r + offset] + 1 + else: + break # offset is out of bounds => aims past reference + + if idx == prev_idx: + continue # skip idx if already tried + + prev_idx = idx + + shifted_words = _perform_shift(words_h, start_h, length, idx) + assert(len(shifted_words) == len(words_h)) + + # Elements of the tuple are designed to replicate Tercom ranking + # of shifts: + candidate = ( + pre_score - cached_ed(shifted_words)[0], # highest score first + length, # then, longest match first + -start_h, # then, earliest match first + -idx, # then, earliest target position first + shifted_words, + ) + + checked_candidates += 1 + + if not best or candidate > best: + best = candidate + + if checked_candidates >= _MAX_SHIFT_CANDIDATES: + break + + if not best: + return 0, words_h, checked_candidates + else: + best_score, _, _, _, shifted_words = best + return best_score, shifted_words, checked_candidates + + +def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]: + """Perform a shift in `words` from `start` to `target`. + + :param words: Words to shift. + :param start: Where from. + :param length: How many words. + :param target: Where to. + :return: Shifted words. + """ + if target < start: + # shift before previous position + return words[:target] + words[start: start + length] \ + + words[target: start] + words[start + length:] + elif target > start + length: + # shift after previous position + return words[:start] + words[start + length: target] \ + + words[start: start + length] + words[target:] + else: + # shift within the shifted string + return words[:start] + words[start + length: length + target] \ + + words[start: start + length] + words[length + target:] + + +def _find_shifted_pairs(words_h: List[str], words_r: List[str]): + """Find matching word sub-sequences in two lists of words. + + Ignores sub-sequences starting at the same position. + + :param words_h: First word list. + :param words_r: Second word list. + :return: Yields tuples of (h_start, r_start, length) such that: + words_h[h_start:h_start+length] = words_r[r_start:r_start+length] + """ + n_words_h = len(words_h) + n_words_r = len(words_r) + for start_h in range(n_words_h): + for start_r in range(n_words_r): + # this is slightly different from what tercom does but this should + # really only kick in in degenerate cases + if abs(start_r - start_h) > _MAX_SHIFT_DIST: + continue + + length = 0 + while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE: + length += 1 + + yield start_h, start_r, length + + # If one sequence is consumed, stop processing + if n_words_h == start_h + length or n_words_r == start_r + length: + break + + +def _flip_trace(trace): + """Flip the trace of edit operations. + + Instead of rewriting a->b, get a recipe for rewriting b->a. + + Simply flips insertions and deletions. + """ + return trace.translate(_FLIP_OPS) + + +def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]: + """Transform trace of edit operations into an alignment of the sequences. + + :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d'). + :return: Alignment, error positions in reference, error positions in hypothesis. + """ + pos_hyp = -1 + pos_ref = -1 + hyp_err = [] + ref_err = [] + align = {} + + # we are rewriting a into b + for op in trace: + if op == _OP_NOP: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(0) + ref_err.append(0) + elif op == _OP_SUB: + pos_hyp += 1 + pos_ref += 1 + align[pos_ref] = pos_hyp + hyp_err.append(1) + ref_err.append(1) + elif op == _OP_INS: + pos_hyp += 1 + hyp_err.append(1) + elif op == _OP_DEL: + pos_ref += 1 + align[pos_ref] = pos_hyp + ref_err.append(1) + else: + raise Exception(f"unknown operation {op!r}") + + return align, ref_err, hyp_err + + +class BeamEditDistance: + """Edit distance with several features required for TER calculation. + + * internal cache + * "beam" search + * tracking of edit operations + + The internal self._cache works like this: + + Keys are words of the hypothesis. Values are tuples (next_node, row) where: + + * next_node is the cache for the next word in the sequence + * row is the stored row of the edit distance matrix + + Effectively, caching allows to skip several rows in the edit distance + matrix calculation and instead, to initialize the computation with the last + matching matrix row. + + Beam search, as implemented here, only explores a fixed-size sub-row of + candidates around the matrix diagonal (more precisely, it's a + "pseudo"-diagonal since we take the ratio of sequence lengths into account). + + Tracking allows to reconstruct the optimal sequence of edit operations. + + :param words_ref: A list of reference tokens. + """ + def __init__(self, words_ref: List[str]): + """`BeamEditDistance` initializer.""" + self._words_ref = words_ref + self._n_words_ref = len(self._words_ref) + + # first row corresponds to insertion operations of the reference, + # so we do 1 edit operation per reference word + self._initial_row = [(i * _COST_INS, _OP_INS) + for i in range(self._n_words_ref + 1)] + + self._cache = {} # type: Dict[str, Tuple] + self._cache_size = 0 + + # Precomputed empty matrix row. Contains infinities so that beam search + # avoids using the uninitialized cells. + self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1) + + def __call__(self, words_hyp: List[str]) -> Tuple[int, str]: + """Calculate edit distance between self._words_ref and the hypothesis. + + Uses cache to skip some of the computation. + + :param words_hyp: Words in translation hypothesis. + :return: Edit distance score. + """ + + # skip initial words in the hypothesis for which we already know the + # edit distance + start_position, dist = self._find_cache(words_hyp) + + # calculate the rest of the edit distance matrix + edit_distance, newly_created_matrix, trace = self._edit_distance( + words_hyp, start_position, dist) + + # update our cache with the newly calculated rows + self._add_cache(words_hyp, newly_created_matrix) + + return edit_distance, trace + + def _edit_distance(self, words_h: List[str], start_h: int, + cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]: + """Actual edit distance calculation. + + Can be initialized with the last cached row and a start position in + the hypothesis that it corresponds to. + + :param words_h: Words in translation hypothesis. + :param start_h: Position from which to start the calculation. + (This is zero if no cache match was found.) + :param cache: Precomputed rows corresponding to edit distance matrix + before `start_h`. + :return: Edit distance value, newly computed rows to update the + cache, trace. + """ + + n_words_h = len(words_h) + + # initialize the rest of the matrix with infinite edit distances + rest_empty = [list(self._empty_row) + for _ in range(n_words_h - start_h)] + + dist = cache + rest_empty + + assert len(dist) == n_words_h + 1 + + length_ratio = self._n_words_ref / n_words_h if words_h else 1 + + # in some crazy sentences, the difference in length is so large that + # we may end up with zero overlap with previous row + if _BEAM_WIDTH < length_ratio / 2: + beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH) + else: + beam_width = _BEAM_WIDTH + + # calculate the Levenshtein distance + for i in range(start_h + 1, n_words_h + 1): + pseudo_diag = math.floor(i * length_ratio) + min_j = max(0, pseudo_diag - beam_width) + max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width) + + if i == n_words_h: + max_j = self._n_words_ref + 1 + + for j in range(min_j, max_j): + if j == 0: + dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL) + else: + if words_h[i - 1] == self._words_ref[j - 1]: + cost_sub = 0 + op_sub = _OP_NOP + else: + cost_sub = _COST_SUB + op_sub = _OP_SUB + + # Tercom prefers no-op/sub, then insertion, then deletion. + # But since we flip the trace and compute the alignment from + # the inverse, we need to swap order of insertion and + # deletion in the preference. + ops = ( + (dist[i - 1][j - 1][0] + cost_sub, op_sub), + (dist[i - 1][j][0] + _COST_DEL, _OP_DEL), + (dist[i][j - 1][0] + _COST_INS, _OP_INS), + ) + + for op_cost, op_name in ops: + if dist[i][j][0] > op_cost: + dist[i][j] = op_cost, op_name + + # get the trace + trace = "" + i = n_words_h + j = self._n_words_ref + + while i > 0 or j > 0: + op = dist[i][j][1] + trace = op + trace + if op in (_OP_SUB, _OP_NOP): + i -= 1 + j -= 1 + elif op == _OP_INS: + j -= 1 + elif op == _OP_DEL: + i -= 1 + else: + raise Exception(f"unknown operation {op!r}") + + return dist[-1][-1][0], dist[len(cache):], trace + + def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]): + """Add newly computed rows to cache. + + Since edit distance is only calculated on the hypothesis suffix that + was not in cache, the number of rows in `mat` may be shorter than + hypothesis length. In that case, we skip over these initial words. + + :param words_hyp: Hypothesis words. + :param mat: Edit distance matrix rows for each position. + """ + if self._cache_size >= _MAX_CACHE_SIZE: + return + + node = self._cache + + n_mat = len(mat) + + # how many initial words to skip + skip_num = len(words_hyp) - n_mat + + # jump through the cache to the current position + for i in range(skip_num): + node = node[words_hyp[i]][0] + + assert len(words_hyp[skip_num:]) == n_mat + + # update cache with newly computed rows + for word, row in zip(words_hyp[skip_num:], mat): + if word not in node: + node[word] = ({}, tuple(row)) + self._cache_size += 1 + value = node[word] + node = value[0] + + def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]: + """Find the already computed rows of the edit distance matrix in cache. + + Returns a partially computed edit distance matrix. + + :param words_hyp: Translation hypothesis. + :return: Tuple (start position, dist). + """ + node = self._cache + start_position = 0 + dist = [self._initial_row] + for word in words_hyp: + if word in node: + start_position += 1 + node, row = node[word] + dist.append(row) + else: + break + + return start_position, dist diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py new file mode 100644 index 0000000000000000000000000000000000000000..40f8221853ac651502435fae3efd9db6a7f7aa04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py @@ -0,0 +1,195 @@ +"""The implementation of the TER metric (Snover et al., 2006).""" + +# Copyright 2020 Memsource +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Dict, Sequence, Optional, Any + +from ..tokenizers.tokenizer_ter import TercomTokenizer +from ..utils import sum_of_lists +from .base import Score, Signature, Metric +from .lib_ter import translation_edit_rate + + +class TERSignature(Signature): + """A convenience class to represent the reproducibility signature for TER. + + :param args: key-value dictionary passed from the actual metric instance. + """ + def __init__(self, args: dict): + """`TERSignature` initializer.""" + super().__init__(args) + self._abbr.update({ + 'case': 'c', + 'tok': 't', + 'norm': 'nr', + 'punct': 'pn', + 'asian': 'as', + }) + + self.info.update({ + 'case': 'mixed' if args['case_sensitive'] else 'lc', + 'tok': args['tokenizer_signature'], + 'norm': args['normalized'], + 'punct': not args['no_punct'], + 'asian': args['asian_support'], + }) + + +class TERScore(Score): + """A convenience class to represent TER scores. + + :param score: The TER score. + :param num_edits: The cumulative number of edits. + :param ref_length: The cumulative average reference length. + """ + def __init__(self, score: float, num_edits: float, ref_length: float): + """`TERScore` initializer.""" + super().__init__('TER', score) + self.num_edits = int(num_edits) + self.ref_length = ref_length + + +class TER(Metric): + """Translation edit rate (TER). A near-exact reimplementation of the Tercom + algorithm, produces identical results on all "sane" outputs. + + Tercom original implementation: https://github.com/jhclark/tercom + + The beam edit distance algorithm uses a slightly different approach (we stay + around the diagonal which is faster, at least in Python) so in some + (extreme) corner cases, the output could differ. + + Caching in the edit distance is based partly on the PyTer package by Hiroyuki + Tanaka (MIT license). (https://github.com/aflc/pyter) + + :param normalized: Enable character normalization. By default, normalizes a couple of things such as + newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When + 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e. + split them down to the character level. + :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical + punctuation markers in Asian languages (CJK). + :param asian_support: Enable special treatment of Asian characters. This option only has an effect when + 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK) + characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support', + specific unicode ranges for CJK and full-width punctuations are also removed. + :param case_sensitive: If `True`, does not lowercase sentences. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If given, the reference info + will be pre-computed and cached for faster re-computation across many systems. + """ + + _SIGNATURE_TYPE = TERSignature + + def __init__(self, normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False, + references: Optional[Sequence[Sequence[str]]] = None): + """`TER` initializer.""" + super().__init__() + + self.no_punct = no_punct + self.normalized = normalized + self.asian_support = asian_support + self.case_sensitive = case_sensitive + + self.tokenizer = TercomTokenizer( + normalized=self.normalized, + no_punct=self.no_punct, + asian_support=self.asian_support, + case_sensitive=self.case_sensitive, + ) + self.tokenizer_signature = self.tokenizer.signature() + + if references is not None: + self._ref_cache = self._cache_references(references) + + def _preprocess_segment(self, sent: str) -> str: + """Given a sentence, apply tokenization if enabled. + + :param sent: The input sentence string. + :return: The pre-processed output string. + """ + return self.tokenizer(sent.rstrip()) + + def _compute_score_from_stats(self, stats: List[float]) -> TERScore: + """Computes the final score from already aggregated statistics. + + :param stats: A list or numpy array of segment-level statistics. + :return: A `TERScore` object. + """ + total_edits, sum_ref_lengths = stats[0], stats[1] + + if sum_ref_lengths > 0: + score = total_edits / sum_ref_lengths + elif total_edits > 0: + score = 1.0 # empty reference(s) and non-empty hypothesis + else: + score = 0.0 # both reference(s) and hypothesis are empty + + return TERScore(100 * score, total_edits, sum_ref_lengths) + + def _aggregate_and_compute(self, stats: List[List[float]]) -> TERScore: + """Computes the final TER score given the pre-computed corpus statistics. + + :param stats: A list of segment-level statistics + :return: A `TERScore` instance. + """ + return self._compute_score_from_stats(sum_of_lists(stats)) + + def _compute_segment_statistics( + self, hypothesis: str, ref_kwargs: Dict) -> List[float]: + """Given a (pre-processed) hypothesis sentence and already computed + reference words, returns the segment statistics required to compute + the full TER score. + + :param hypothesis: Hypothesis sentence. + :param ref_kwargs: A dictionary with `ref_words` key which is a list + where each sublist contains reference words. + :return: A two-element list that contains the 'minimum number of edits' + and 'the average reference length'. + """ + + ref_lengths = 0 + best_num_edits = int(1e16) + + words_hyp = hypothesis.split() + + # Iterate the references + ref_words = ref_kwargs['ref_words'] + for words_ref in ref_words: + num_edits, ref_len = translation_edit_rate(words_hyp, words_ref) + ref_lengths += ref_len + if num_edits < best_num_edits: + best_num_edits = num_edits + + avg_ref_len = ref_lengths / len(ref_words) + return [best_num_edits, avg_ref_len] + + def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]: + """Given a list of reference segments, applies pre-processing & tokenization + and returns list of tokens for each reference. + + :param refs: A sequence of strings. + :return: A dictionary that will be passed to `_compute_segment_statistics()` + through keyword arguments. + """ + ref_words = [] + + for ref in refs: + ref_words.append(self._preprocess_segment(ref).split()) + + return {'ref_words': ref_words} diff --git a/venv/lib/python3.10/site-packages/tabledata/__init__.py b/venv/lib/python3.10/site-packages/tabledata/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f41c368069a1c7002ad98d98095aff52c9d1e82c --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/__init__.py @@ -0,0 +1,24 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._common import convert_idx_to_alphabet +from ._constant import PatternMatch +from ._converter import to_value_matrix +from ._core import TableData +from ._logger import set_log_level, set_logger +from .error import DataError, InvalidHeaderNameError, InvalidTableNameError, NameValidationError + + +__all__ = ( + "convert_idx_to_alphabet", + "set_logger", + "to_value_matrix", + "PatternMatch", + "TableData", + "DataError", + "InvalidHeaderNameError", + "InvalidTableNameError", + "NameValidationError", +) diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a5b75b56dc05eeefc698ad4929d72cf3880fe9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc88f446f02f28ed74a7a92a163411d8de4326a Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabledata/__version__.py b/venv/lib/python3.10/site-packages/tabledata/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..fefee19a86f0a0eaf8f4d0e6e3c11e3af4ae6a83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2017, {__author__}" +__license__ = "MIT License" +__version__ = "1.3.3" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/venv/lib/python3.10/site-packages/tabledata/_common.py b/venv/lib/python3.10/site-packages/tabledata/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..944e9474385d5ac4cace526f532564308a1fd13a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_common.py @@ -0,0 +1,12 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +def convert_idx_to_alphabet(idx: int) -> str: + if idx < 26: + return chr(65 + idx) + + div, mod = divmod(idx, 26) + + return convert_idx_to_alphabet(div - 1) + convert_idx_to_alphabet(mod) diff --git a/venv/lib/python3.10/site-packages/tabledata/_constant.py b/venv/lib/python3.10/site-packages/tabledata/_constant.py new file mode 100644 index 0000000000000000000000000000000000000000..722f1372ff8416da2a9c5733c11d8351e87c792f --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_constant.py @@ -0,0 +1,11 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum + + +@enum.unique +class PatternMatch(enum.Enum): + OR = 0 + AND = 1 diff --git a/venv/lib/python3.10/site-packages/tabledata/_converter.py b/venv/lib/python3.10/site-packages/tabledata/_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..ce0799f5298220aa225739c2eb4825706bf827b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_converter.py @@ -0,0 +1,35 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from typing import Any, List, Sequence, Tuple + +from .error import DataError + + +Row = Tuple[int, Any] + + +def to_value_matrix(headers: Sequence[str], value_matrix: Sequence[Any]) -> List[Row]: + if not value_matrix: + return [] + + return [_to_row(headers, values, row_idx)[1] for row_idx, values in enumerate(value_matrix)] + + +def _to_row(headers: Sequence[str], values: Any, row_idx: int) -> Row: + if headers: + try: + values = values._asdict() + except AttributeError: + pass + + try: + return (row_idx, [values.get(header) for header in headers]) + except (TypeError, AttributeError): + pass + + if not isinstance(values, (tuple, list)): + raise DataError(f"row must be a list or tuple: actual={type(values)}") + + return (row_idx, values) diff --git a/venv/lib/python3.10/site-packages/tabledata/_core.py b/venv/lib/python3.10/site-packages/tabledata/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..1d16517eefdafae0ab12e555fd287242352e7968 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/_core.py @@ -0,0 +1,510 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import re +from collections import OrderedDict, namedtuple +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union + +import dataproperty as dp +import typepy +from dataproperty import DataPropertyMatrix +from dataproperty.typing import TypeHint +from typepy import Nan + +from ._constant import PatternMatch +from ._converter import to_value_matrix +from ._logger import logger + + +if TYPE_CHECKING: + import pandas + + +class TableData: + """ + Class to represent a table data structure. + + :param table_name: Name of the table. + :param headers: Table header names. + :param rows: Data of the table. + """ + + def __init__( + self, + table_name: Optional[str], + headers: Sequence[str], + rows: Sequence, + dp_extractor: Optional[dp.DataPropertyExtractor] = None, + type_hints: Optional[Sequence[Union[str, TypeHint]]] = None, + max_workers: Optional[int] = None, + max_precision: Optional[int] = None, + ) -> None: + self.__table_name = table_name + self.__value_matrix: List[List[Any]] = [] + self.__value_dp_matrix: Optional[DataPropertyMatrix] = None + + if rows: + self.__rows = rows + else: + self.__rows = [] + + if dp_extractor: + self.__dp_extractor = copy.deepcopy(dp_extractor) + else: + self.__dp_extractor = dp.DataPropertyExtractor(max_precision=max_precision) + + if type_hints: + self.__dp_extractor.column_type_hints = type_hints + + self.__dp_extractor.strip_str_header = '"' + + if max_workers: + self.__dp_extractor.max_workers = max_workers + + if not headers: + self.__dp_extractor.headers = [] + else: + self.__dp_extractor.headers = headers + + def __repr__(self) -> str: + element_list = [f"table_name={self.table_name}"] + + try: + element_list.append("headers=[{}]".format(", ".join(self.headers))) + except TypeError: + element_list.append("headers=None") + + element_list.extend([f"cols={self.num_columns}", f"rows={self.num_rows}"]) + + return ", ".join(element_list) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, TableData): + return False + + return self.equals(other, cmp_by_dp=False) + + def __ne__(self, other: Any) -> bool: + if not isinstance(other, TableData): + return True + + return not self.equals(other, cmp_by_dp=False) + + @property + def table_name(self) -> Optional[str]: + """str: Name of the table.""" + + return self.__table_name + + @table_name.setter + def table_name(self, value: Optional[str]) -> None: + self.__table_name = value + + @property + def headers(self) -> Sequence[str]: + """Sequence[str]: Table header names.""" + + return self.__dp_extractor.headers + + @property + def rows(self) -> Sequence: + """Sequence: Original rows of tabular data.""" + + return self.__rows + + @property + def value_matrix(self) -> DataPropertyMatrix: + """DataPropertyMatrix: Converted rows of tabular data.""" + + if self.__value_matrix: + return self.__value_matrix + + self.__value_matrix = [ + [value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix + ] + + return self.__value_matrix + + @property + def has_value_dp_matrix(self) -> bool: + return self.__value_dp_matrix is not None + + @property + def max_workers(self) -> int: + return self.__dp_extractor.max_workers + + @max_workers.setter + def max_workers(self, value: Optional[int]) -> None: + self.__dp_extractor.max_workers = value + + @property + def num_rows(self) -> Optional[int]: + """Optional[int]: + Number of rows in the tabular data. + |None| if the ``rows`` is neither list nor tuple. + """ + + try: + return len(self.rows) + except TypeError: + return None + + @property + def num_columns(self) -> Optional[int]: + if typepy.is_not_empty_sequence(self.headers): + return len(self.headers) + + try: + return len(self.rows[0]) + except TypeError: + return None + except IndexError: + return 0 + + @property + def value_dp_matrix(self) -> DataPropertyMatrix: + """DataPropertyMatrix: DataProperty for table data.""" + + if self.__value_dp_matrix is None: + self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix( + to_value_matrix(self.headers, self.rows) + ) + + return self.__value_dp_matrix + + @property + def header_dp_list(self) -> List[dp.DataProperty]: + return self.__dp_extractor.to_header_dp_list() + + @property + def column_dp_list(self) -> List[dp.ColumnDataProperty]: + return self.__dp_extractor.to_column_dp_list(self.value_dp_matrix) + + @property + def dp_extractor(self) -> dp.DataPropertyExtractor: + return self.__dp_extractor + + def is_empty_header(self) -> bool: + """bool: |True| if the data :py:attr:`.headers` is empty.""" + + return typepy.is_empty_sequence(self.headers) + + def is_empty_rows(self) -> bool: + """ + :return: |True| if the tabular data has no rows. + :rtype: bool + """ + + return self.num_rows == 0 + + def is_empty(self) -> bool: + """ + :return: + |True| if the data :py:attr:`.headers` or + :py:attr:`.value_matrix` is empty. + :rtype: bool + """ + + return any([self.is_empty_header(), self.is_empty_rows()]) + + def equals(self, other: "TableData", cmp_by_dp: bool = True) -> bool: + if cmp_by_dp: + return self.__equals_dp(other) + + return self.__equals_raw(other) + + def __equals_base(self, other: "TableData") -> bool: + compare_item_list = [self.table_name == other.table_name] + + if self.num_rows is not None: + compare_item_list.append(self.num_rows == other.num_rows) + + return all(compare_item_list) + + def __equals_raw(self, other: "TableData") -> bool: + if not self.__equals_base(other): + return False + + if self.headers != other.headers: + return False + + for lhs_row, rhs_row in zip(self.rows, other.rows): + if len(lhs_row) != len(rhs_row): + return False + + if not all( + [ + lhs == rhs + for lhs, rhs in zip(lhs_row, rhs_row) + if not Nan(lhs).is_type() and not Nan(rhs).is_type() + ] + ): + return False + + return True + + def __equals_dp(self, other: "TableData") -> bool: + if not self.__equals_base(other): + return False + + if self.header_dp_list != other.header_dp_list: + return False + + if self.value_dp_matrix is None or other.value_dp_matrix is None: + return False + + for lhs_list, rhs_list in zip(self.value_dp_matrix, other.value_dp_matrix): + if len(lhs_list) != len(rhs_list): + return False + + if any([lhs != rhs for lhs, rhs in zip(lhs_list, rhs_list)]): + return False + + return True + + def in_tabledata_list(self, other: Sequence["TableData"], cmp_by_dp: bool = True) -> bool: + for table_data in other: + if self.equals(table_data, cmp_by_dp=cmp_by_dp): + return True + + return False + + def validate_rows(self) -> None: + """ + :raises ValueError: + """ + + invalid_row_idx_list = [] + + for row_idx, row in enumerate(self.rows): + if isinstance(row, (list, tuple)) and len(self.headers) != len(row): + invalid_row_idx_list.append(row_idx) + + if isinstance(row, dict): + if not all([header in row for header in self.headers]): + invalid_row_idx_list.append(row_idx) + + if not invalid_row_idx_list: + return + + for invalid_row_idx in invalid_row_idx_list: + logger.debug(f"invalid row (line={invalid_row_idx}): {self.rows[invalid_row_idx]}") + + raise ValueError( + "table header length and row length are mismatch:\n" + + f" header(len={len(self.headers)}): {self.headers}\n" + + " # of miss match rows: {} ouf of {}\n".format( + len(invalid_row_idx_list), self.num_rows + ) + ) + + def as_dict(self, default_key: str = "table") -> Dict[str, List["OrderedDict[str, Any]"]]: + """ + Args: + default_key: + Key of a returning dictionary when the ``table_name`` is empty. + + Returns: + dict: Table data as a |dict| instance. + + Sample Code: + .. code:: python + + from tabledata import TableData + + TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_dict() + + Output: + .. code:: json + + {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]} + """ # noqa + + dict_body = [] + for row in self.value_matrix: + if not row: + continue + + values = [ + (header, value) for header, value in zip(self.headers, row) if value is not None + ] + + if not values: + continue + + dict_body.append(OrderedDict(values)) + + table_name = self.table_name + if not table_name: + table_name = default_key + + return {table_name: dict_body} + + def as_tuple(self) -> Iterator[Tuple]: + """ + :return: Rows of the tuple. + :rtype: list of |namedtuple| + + :Sample Code: + .. code:: python + + from tabledata import TableData + + records = TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_tuple() + for record in records: + print(record) + + :Output: + .. code-block:: none + + Row(a=1, b=2) + Row(a=Decimal('3.3'), b=Decimal('4.4')) + """ + + Row = namedtuple("Row", self.headers) # type: ignore + + for value_dp_list in self.value_dp_matrix: + if typepy.is_empty_sequence(value_dp_list): + continue + + row = Row(*(value_dp.data for value_dp in value_dp_list)) + + yield row + + def as_dataframe(self) -> "pandas.DataFrame": + """ + :return: Table data as a ``pandas.DataFrame`` instance. + :rtype: pandas.DataFrame + + :Sample Code: + .. code-block:: python + + from tabledata import TableData + + TableData( + "sample", + ["a", "b"], + [[1, 2], [3.3, 4.4]] + ).as_dataframe() + + :Output: + .. code-block:: none + + a b + 0 1 2 + 1 3.3 4.4 + + :Dependency Packages: + - `pandas `__ + """ + + try: + from pandas import DataFrame + except ImportError: + raise RuntimeError("required 'pandas' package to execute as_dataframe method") + + dataframe = DataFrame(self.value_matrix) + if not self.is_empty_header(): + dataframe.columns = self.headers + + return dataframe + + def transpose(self) -> "TableData": + return TableData( + self.table_name, + self.headers, + [row for row in zip(*self.rows)], + max_workers=self.max_workers, + ) + + def filter_column( + self, + patterns: Optional[str] = None, + is_invert_match: bool = False, + is_re_match: bool = False, + pattern_match: PatternMatch = PatternMatch.OR, + ) -> "TableData": + logger.debug( + "filter_column: patterns={}, is_invert_match={}, " + "is_re_match={}, pattern_match={}".format( + patterns, is_invert_match, is_re_match, pattern_match + ) + ) + + if not patterns: + return self + + match_header_list = [] + match_column_matrix = [] + + if pattern_match == PatternMatch.OR: + match_method = any + elif pattern_match == PatternMatch.AND: + match_method = all + else: + raise ValueError(f"unknown matching: {pattern_match}") + + for header, column in zip(self.headers, zip(*self.rows)): + is_match_list = [] + for pattern in patterns: + is_match = self.__is_match(header, pattern, is_re_match) + + is_match_list.append( + any([is_match and not is_invert_match, not is_match and is_invert_match]) + ) + + if match_method(is_match_list): + match_header_list.append(header) + match_column_matrix.append(column) + + logger.debug( + "filter_column: table={}, match_header_list={}".format( + self.table_name, match_header_list + ) + ) + + return TableData( + self.table_name, + match_header_list, + list(zip(*match_column_matrix)), + max_workers=self.max_workers, + ) + + @staticmethod + def from_dataframe( + dataframe: "pandas.DataFrame", + table_name: str = "", + type_hints: Optional[Sequence[TypeHint]] = None, + max_workers: Optional[int] = None, + ) -> "TableData": + """ + Initialize TableData instance from a pandas.DataFrame instance. + + :param pandas.DataFrame dataframe: + :param str table_name: Table name to create. + """ + + return TableData( + table_name, + list(dataframe.columns.values), + dataframe.values.tolist(), + type_hints=type_hints, + max_workers=max_workers, + ) + + @staticmethod + def __is_match(header: str, pattern: str, is_re_match: bool) -> bool: + if is_re_match: + return re.search(pattern, header) is not None + + return header == pattern diff --git a/venv/lib/python3.10/site-packages/tabledata/error.py b/venv/lib/python3.10/site-packages/tabledata/error.py new file mode 100644 index 0000000000000000000000000000000000000000..35084f8b1af8fa41a12f4fcaf5f0710771019f41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/error.py @@ -0,0 +1,27 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +class NameValidationError(ValueError): + """ + Exception raised when a name is invalid. + """ + + +class InvalidTableNameError(NameValidationError): + """ + Exception raised when a table name is invalid. + """ + + +class InvalidHeaderNameError(NameValidationError): + """ + Exception raised when a table header name is invalid. + """ + + +class DataError(ValueError): + """ + Exception raised when data is invalid as tabular data. + """ diff --git a/venv/lib/python3.10/site-packages/tabledata/normalizer.py b/venv/lib/python3.10/site-packages/tabledata/normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..5f5c383f51c57c49eeeb611679d3b5c8fe90ff52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabledata/normalizer.py @@ -0,0 +1,207 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +import warnings +from typing import List, Sequence + +import typepy +from dataproperty.typing import TypeHint + +from ._core import TableData +from ._logger import logger +from .error import InvalidHeaderNameError, InvalidTableNameError + + +class TableDataNormalizerInterface(metaclass=abc.ABCMeta): + """ + Interface class to validate and normalize data of |TableData|. + """ + + @abc.abstractmethod + def validate(self) -> None: # pragma: no cover + pass + + @abc.abstractmethod + def normalize(self) -> TableData: # pragma: no cover + pass + + +class AbstractTableDataNormalizer(TableDataNormalizerInterface): + @property + def _type_hints(self) -> List[TypeHint]: + return self._tabledata.dp_extractor.column_type_hints + + def __init__(self, tabledata: TableData) -> None: + self._tabledata = tabledata + + def validate(self) -> None: + if not self._tabledata.table_name: + raise ValueError("table_name must not be empty") + + self._validate_table_name(self._tabledata.table_name) + self._validate_headers() + + def sanitize(self): # type: ignore + warnings.warn( + "'sanitize' method is deprecated and will be removed in the future." + " use 'normalize' method instead.", + DeprecationWarning, + ) + + return self.normalize() + + def normalize(self) -> TableData: + """ + :return: Sanitized table data. + :rtype: tabledata.TableData + """ + + logger.debug(f"normalize: {type(self).__name__}") + + normalize_headers = self._normalize_headers() + + return TableData( + self.__normalize_table_name(), + normalize_headers, + self._normalize_rows(normalize_headers), + dp_extractor=self._tabledata.dp_extractor, + type_hints=self._type_hints, + max_workers=self._tabledata.max_workers, + ) + + @abc.abstractmethod + def _preprocess_table_name(self) -> str: + """ + This method is always called before table name validation. + You must return preprocessed table name. + """ + + @abc.abstractmethod + def _validate_table_name(self, table_name: str) -> None: + """ + Must raise :py:class:`~.InvalidTableNameError` + when you consider the table name invalid. + + :param str header: Table name to validate. + :raises tabledata.InvalidTableNameError: + If the table name is invalid. + |raises_validate_table_name| + """ + + @abc.abstractmethod + def _normalize_table_name(self, table_name: str) -> str: + """ + Must return a valid table name. + The table name must be considered to be a valid name by + :py:meth:`~._validate_table_name` method. + + This method called when :py:meth:`~._validate_table_name` method raise + :py:class:`~.InvalidTableNameError`. + + :param str table_name: Table name to normalize. + :return: Sanitized table name. + :rtype: str + """ + + @abc.abstractmethod + def _preprocess_header(self, col_idx: int, header: str) -> str: + """ + This method is always called before a header validation. + You must return preprocessed header. + """ + + @abc.abstractmethod + def _validate_header(self, header: str) -> None: + """ + No operation. + + This method called for each table header. Override this method + in a subclass if you want to detect invalid table header elements. + Raise :py:class:`~.InvalidHeaderNameError` if an invalid + header element found. + + :param str header: Table header name. + :raises tabledata.InvalidHeaderNameError: + If the ``header`` is invalid. + """ + + @abc.abstractmethod + def _normalize_header(self, header: str) -> str: + """ + Must return a valid header name. + This method called when :py:meth:`~._validate_header` method raise + :py:class:`~.InvalidHeaderNameError`. + Override this method in subclass if you want to rename invalid + table header element. + + :param str header: Header name to normalize. + :return: Renamed header name. + :rtype: str + """ + + def _normalize_rows(self, normalize_headers: Sequence[str]) -> List: + return list(self._tabledata.rows) + + def _validate_headers(self) -> None: + for header in self._tabledata.headers: + self._validate_header(header) + + def __normalize_table_name(self) -> str: + preprocessed_table_name = self._preprocess_table_name() + + try: + self._validate_table_name(preprocessed_table_name) + new_table_name = preprocessed_table_name + except InvalidTableNameError: + new_table_name = self._normalize_table_name(preprocessed_table_name) + self._validate_table_name(new_table_name) + + return new_table_name + + def _normalize_headers(self) -> List[str]: + new_header_list = [] + + for col_idx, header in enumerate(self._tabledata.headers): + header = self._preprocess_header(col_idx, header) + + try: + self._validate_header(header) + new_header = header + except InvalidHeaderNameError: + new_header = self._normalize_header(header) + self._validate_header(new_header) + + new_header_list.append(new_header) + + return new_header_list + + +class TableDataNormalizer(AbstractTableDataNormalizer): + def _preprocess_table_name(self) -> str: + if not self._tabledata.table_name: + return "" + + return self._tabledata.table_name + + def _validate_table_name(self, table_name: str) -> None: + try: + typepy.String(table_name).validate() + except TypeError as e: + raise InvalidTableNameError(e) + + def _normalize_table_name(self, table_name: str) -> str: + return str(typepy.String(table_name).force_convert()) + + def _preprocess_header(self, col_idx: int, header: str) -> str: + return header + + def _validate_header(self, header: str) -> None: + try: + typepy.String(header).validate() + except TypeError as e: + raise InvalidHeaderNameError(e) + + def _normalize_header(self, header: str) -> str: + return str(typepy.String(header).force_convert()) diff --git a/venv/lib/python3.10/site-packages/tabledata/py.typed b/venv/lib/python3.10/site-packages/tabledata/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/tabulate/__init__.py b/venv/lib/python3.10/site-packages/tabulate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..503df34848d710b62099e514dee3202de29c3f2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabulate/__init__.py @@ -0,0 +1,2716 @@ +"""Pretty-print tabular data.""" + +from collections import namedtuple +from collections.abc import Iterable, Sized +from html import escape as htmlescape +from itertools import chain, zip_longest as izip_longest +from functools import reduce, partial +import io +import re +import math +import textwrap +import dataclasses + +try: + import wcwidth # optional wide-character (CJK) support +except ImportError: + wcwidth = None + + +def _is_file(f): + return isinstance(f, io.IOBase) + + +__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] +try: + from .version import version as __version__ # noqa: F401 +except ImportError: + pass # running __init__.py as a script, AppVeyor pytests + + +# minimum extra space in headers +MIN_PADDING = 2 + +# Whether or not to preserve leading/trailing whitespace in data. +PRESERVE_WHITESPACE = False + +_DEFAULT_FLOATFMT = "g" +_DEFAULT_INTFMT = "" +_DEFAULT_MISSINGVAL = "" +# default align will be overwritten by "left", "center" or "decimal" +# depending on the formatter +_DEFAULT_ALIGN = "default" + + +# if True, enable wide-character (CJK) support +WIDE_CHARS_MODE = wcwidth is not None + +# Constant that can be used as part of passed rows to generate a separating line +# It is purposely an unprintable character, very unlikely to be used in a table +SEPARATING_LINE = "\001" + +Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) + + +DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) + + +# A table structure is supposed to be: +# +# --- lineabove --------- +# headerrow +# --- linebelowheader --- +# datarow +# --- linebetweenrows --- +# ... (more datarows) ... +# --- linebetweenrows --- +# last datarow +# --- linebelow --------- +# +# TableFormat's line* elements can be +# +# - either None, if the element is not used, +# - or a Line tuple, +# - or a function: [col_widths], [col_alignments] -> string. +# +# TableFormat's *row elements can be +# +# - either None, if the element is not used, +# - or a DataRow tuple, +# - or a function: [cell_values], [col_widths], [col_alignments] -> string. +# +# padding (an integer) is the amount of white space around data values. +# +# with_header_hide: +# +# - either None, to display all table elements unconditionally, +# - or a list of elements not to be displayed if the table has column headers. +# +TableFormat = namedtuple( + "TableFormat", + [ + "lineabove", + "linebelowheader", + "linebetweenrows", + "linebelow", + "headerrow", + "datarow", + "padding", + "with_header_hide", + ], +) + + +def _is_separating_line(row): + row_type = type(row) + is_sl = (row_type == list or row_type == str) and ( + (len(row) >= 1 and row[0] == SEPARATING_LINE) + or (len(row) >= 2 and row[1] == SEPARATING_LINE) + ) + return is_sl + + +def _pipe_segment_with_colons(align, colwidth): + """Return a segment of a horizontal line with optional colons which + indicate column's alignment (as in `pipe` output format).""" + w = colwidth + if align in ["right", "decimal"]: + return ("-" * (w - 1)) + ":" + elif align == "center": + return ":" + ("-" * (w - 2)) + ":" + elif align == "left": + return ":" + ("-" * (w - 1)) + else: + return "-" * w + + +def _pipe_line_with_colons(colwidths, colaligns): + """Return a horizontal line with optional colons to indicate column's + alignment (as in `pipe` output format).""" + if not colaligns: # e.g. printing an empty data frame (github issue #15) + colaligns = [""] * len(colwidths) + segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)] + return "|" + "|".join(segments) + "|" + + +def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": 'align="right"| ', + "center": 'align="center"| ', + "decimal": 'align="right"| ', + } + # hard-coded padding _around_ align attribute and value together + # rather than padding parameter which affects only the value + values_with_attrs = [ + " " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns) + ] + colsep = separator * 2 + return (separator + colsep.join(values_with_attrs)).rstrip() + + +def _textile_row_with_attrs(cell_values, colwidths, colaligns): + cell_values[0] += " " + alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."} + values = (alignment.get(a, "") + v for a, v in zip(colaligns, cell_values)) + return "|" + "|".join(values) + "|" + + +def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore): + # this table header will be suppressed if there is a header row + return "\n" + + +def _html_row_with_attrs(celltag, unsafe, cell_values, colwidths, colaligns): + alignment = { + "left": "", + "right": ' style="text-align: right;"', + "center": ' style="text-align: center;"', + "decimal": ' style="text-align: right;"', + } + if unsafe: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), c) + for c, a in zip(cell_values, colaligns) + ] + else: + values_with_attrs = [ + "<{0}{1}>{2}".format(celltag, alignment.get(a, ""), htmlescape(c)) + for c, a in zip(cell_values, colaligns) + ] + rowhtml = "{}".format("".join(values_with_attrs).rstrip()) + if celltag == "th": # it's a header row, create a new table header + rowhtml = f"
\n\n{rowhtml}\n\n" + return rowhtml + + +def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=""): + alignment = { + "left": "", + "right": '', + "center": '', + "decimal": '', + } + values_with_attrs = [ + "{}{} {} ".format(celltag, alignment.get(a, ""), header + c + header) + for c, a in zip(cell_values, colaligns) + ] + return "".join(values_with_attrs) + "||" + + +def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False, longtable=False): + alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"} + tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns]) + return "\n".join( + [ + ("\\begin{tabular}{" if not longtable else "\\begin{longtable}{") + + tabular_columns_fmt + + "}", + "\\toprule" if booktabs else "\\hline", + ] + ) + + +def _asciidoc_row(is_header, *args): + """handle header and data rows for asciidoc format""" + + def make_header_line(is_header, colwidths, colaligns): + # generate the column specifiers + + alignment = {"left": "<", "right": ">", "center": "^", "decimal": ">"} + # use the column widths generated by tabulate for the asciidoc column width specifiers + asciidoc_alignments = zip( + colwidths, [alignment[colalign] for colalign in colaligns] + ) + asciidoc_column_specifiers = [ + "{:d}{}".format(width, align) for width, align in asciidoc_alignments + ] + header_list = ['cols="' + (",".join(asciidoc_column_specifiers)) + '"'] + + # generate the list of options (currently only "header") + options_list = [] + + if is_header: + options_list.append("header") + + if options_list: + header_list += ['options="' + ",".join(options_list) + '"'] + + # generate the list of entries in the table header field + + return "[{}]\n|====".format(",".join(header_list)) + + if len(args) == 2: + # two arguments are passed if called in the context of aboveline + # print the table header with column widths and optional header tag + return make_header_line(False, *args) + + elif len(args) == 3: + # three arguments are passed if called in the context of dataline or headerline + # print the table line and make the aboveline if it is a header + + cell_values, colwidths, colaligns = args + data_line = "|" + "|".join(cell_values) + + if is_header: + return make_header_line(True, colwidths, colaligns) + "\n" + data_line + else: + return data_line + + else: + raise ValueError( + " _asciidoc_row() requires two (colwidths, colaligns) " + + "or three (cell_values, colwidths, colaligns) arguments) " + ) + + +LATEX_ESCAPE_RULES = { + r"&": r"\&", + r"%": r"\%", + r"$": r"\$", + r"#": r"\#", + r"_": r"\_", + r"^": r"\^{}", + r"{": r"\{", + r"}": r"\}", + r"~": r"\textasciitilde{}", + "\\": r"\textbackslash{}", + r"<": r"\ensuremath{<}", + r">": r"\ensuremath{>}", +} + + +def _latex_row(cell_values, colwidths, colaligns, escrules=LATEX_ESCAPE_RULES): + def escape_char(c): + return escrules.get(c, c) + + escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values] + rowfmt = DataRow("", "&", "\\\\") + return _build_simple_row(escaped_values, rowfmt) + + +def _rst_escape_first_column(rows, headers): + def escape_empty(val): + if isinstance(val, (str, bytes)) and not val.strip(): + return ".." + else: + return val + + new_headers = list(headers) + new_rows = [] + if headers: + new_headers[0] = escape_empty(headers[0]) + for row in rows: + new_row = list(row) + if new_row: + new_row[0] = escape_empty(row[0]) + new_rows.append(new_row) + return new_rows, new_headers + + +_table_formats = { + "simple": TableFormat( + lineabove=Line("", "-", " ", ""), + linebelowheader=Line("", "-", " ", ""), + linebetweenrows=None, + linebelow=Line("", "-", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=["lineabove", "linebelow"], + ), + "plain": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "grid": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=Line("+", "-", "+", "+"), + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_grid": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_grid": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_grid": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=Line("┣", "━", "╋", "┫"), + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_grid": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_grid": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=Line("╠", "═", "╬", "╣"), + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_grid": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "outline": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_outline": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_outline": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_outline": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=None, + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_outline": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=None, + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_outline": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=None, + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), + "fancy_outline": TableFormat( + lineabove=Line("╒", "═", "╤", "╕"), + linebelowheader=Line("╞", "═", "╪", "╡"), + linebetweenrows=None, + linebelow=Line("╘", "═", "╧", "╛"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "github": TableFormat( + lineabove=Line("|", "-", "|", "|"), + linebelowheader=Line("|", "-", "|", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "pipe": TableFormat( + lineabove=_pipe_line_with_colons, + linebelowheader=_pipe_line_with_colons, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=["lineabove"], + ), + "orgtbl": TableFormat( + lineabove=None, + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "jira": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("||", "||", "||"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "presto": TableFormat( + lineabove=None, + linebelowheader=Line("", "-", "+", ""), + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "|", ""), + datarow=DataRow("", "|", ""), + padding=1, + with_header_hide=None, + ), + "pretty": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "-", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "psql": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("|", "-", "+", "|"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "rst": TableFormat( + lineabove=Line("", "=", " ", ""), + linebelowheader=Line("", "=", " ", ""), + linebetweenrows=None, + linebelow=Line("", "=", " ", ""), + headerrow=DataRow("", " ", ""), + datarow=DataRow("", " ", ""), + padding=0, + with_header_hide=None, + ), + "mediawiki": TableFormat( + lineabove=Line( + '{| class="wikitable" style="text-align: left;"', + "", + "", + "\n|+ \n|-", + ), + linebelowheader=Line("|-", "", "", ""), + linebetweenrows=Line("|-", "", "", ""), + linebelow=Line("|}", "", "", ""), + headerrow=partial(_mediawiki_row_with_attrs, "!"), + datarow=partial(_mediawiki_row_with_attrs, "|"), + padding=0, + with_header_hide=None, + ), + "moinmoin": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=partial(_moin_row_with_attrs, "||", header="'''"), + datarow=partial(_moin_row_with_attrs, "||"), + padding=1, + with_header_hide=None, + ), + "youtrack": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|| ", " || ", " || "), + datarow=DataRow("| ", " | ", " |"), + padding=1, + with_header_hide=None, + ), + "html": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n
", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", False), + datarow=partial(_html_row_with_attrs, "td", False), + padding=0, + with_header_hide=["lineabove"], + ), + "unsafehtml": TableFormat( + lineabove=_html_begin_table_without_header, + linebelowheader="", + linebetweenrows=None, + linebelow=Line("\n", "", "", ""), + headerrow=partial(_html_row_with_attrs, "th", True), + datarow=partial(_html_row_with_attrs, "td", True), + padding=0, + with_header_hide=["lineabove"], + ), + "latex": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_raw": TableFormat( + lineabove=_latex_line_begin_tabular, + linebelowheader=Line("\\hline", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{tabular}", "", "", ""), + headerrow=partial(_latex_row, escrules={}), + datarow=partial(_latex_row, escrules={}), + padding=1, + with_header_hide=None, + ), + "latex_booktabs": TableFormat( + lineabove=partial(_latex_line_begin_tabular, booktabs=True), + linebelowheader=Line("\\midrule", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "latex_longtable": TableFormat( + lineabove=partial(_latex_line_begin_tabular, longtable=True), + linebelowheader=Line("\\hline\n\\endhead", "", "", ""), + linebetweenrows=None, + linebelow=Line("\\hline\n\\end{longtable}", "", "", ""), + headerrow=_latex_row, + datarow=_latex_row, + padding=1, + with_header_hide=None, + ), + "tsv": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("", "\t", ""), + datarow=DataRow("", "\t", ""), + padding=0, + with_header_hide=None, + ), + "textile": TableFormat( + lineabove=None, + linebelowheader=None, + linebetweenrows=None, + linebelow=None, + headerrow=DataRow("|_. ", "|_.", "|"), + datarow=_textile_row_with_attrs, + padding=1, + with_header_hide=None, + ), + "asciidoc": TableFormat( + lineabove=partial(_asciidoc_row, False), + linebelowheader=None, + linebetweenrows=None, + linebelow=Line("|====", "", "", ""), + headerrow=partial(_asciidoc_row, True), + datarow=partial(_asciidoc_row, False), + padding=1, + with_header_hide=["lineabove"], + ), +} + + +tabulate_formats = list(sorted(_table_formats.keys())) + +# The table formats for which multiline cells will be folded into subsequent +# table rows. The key is the original format specified at the API. The value is +# the format that will be used to represent the original format. +multiline_formats = { + "plain": "plain", + "simple": "simple", + "grid": "grid", + "simple_grid": "simple_grid", + "rounded_grid": "rounded_grid", + "heavy_grid": "heavy_grid", + "mixed_grid": "mixed_grid", + "double_grid": "double_grid", + "fancy_grid": "fancy_grid", + "pipe": "pipe", + "orgtbl": "orgtbl", + "jira": "jira", + "presto": "presto", + "pretty": "pretty", + "psql": "psql", + "rst": "rst", +} + +# TODO: Add multiline support for the remaining table formats: +# - mediawiki: Replace \n with
+# - moinmoin: TBD +# - youtrack: TBD +# - html: Replace \n with
+# - latex*: Use "makecell" package: In header, replace X\nY with +# \thead{X\\Y} and in data row, replace X\nY with \makecell{X\\Y} +# - tsv: TBD +# - textile: Replace \n with
(must be well-formed XML) + +_multiline_codes = re.compile(r"\r|\n|\r\n") +_multiline_codes_bytes = re.compile(b"\r|\n|\r\n") + +# Handle ANSI escape sequences for both control sequence introducer (CSI) and +# operating system command (OSC). Both of these begin with 0x1b (or octal 033), +# which will be shown below as ESC. +# +# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48: +# +# CSI: ESC followed by the '[' character (0x5b) +# Parameter Bytes: 0..n bytes in the range 0x30-0x3f +# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f +# Final Byte: a single byte in the range 0x40-0x7e +# +# Also include the terminal hyperlink sequences as described here: +# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda +# +# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST +# +# Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c +# +# Where: +# OSC: ESC followed by the ']' character (0x5d) +# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123) +# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://) +# ST: ESC followed by the '\' character (0x5c) +_esc = r"\x1b" +_csi = rf"{_esc}\[" +_osc = rf"{_esc}\]" +_st = rf"{_esc}\\" + +_ansi_escape_pat = rf""" + ( + # terminal colors, etc + {_csi} # CSI + [\x30-\x3f]* # parameter bytes + [\x20-\x2f]* # intermediate bytes + [\x40-\x7e] # final byte + | + # terminal hyperlinks + {_osc}8; # OSC opening + (\w+=\w+:?)* # key=value params list (submatch 2) + ; # delimiter + ([^{_esc}]+) # URI - anything but ESC (submatch 3) + {_st} # ST + ([^{_esc}]+) # link text - anything but ESC (submatch 4) + {_osc}8;;{_st} # "closing" OSC sequence + ) +""" +_ansi_codes = re.compile(_ansi_escape_pat, re.VERBOSE) +_ansi_codes_bytes = re.compile(_ansi_escape_pat.encode("utf8"), re.VERBOSE) +_ansi_color_reset_code = "\033[0m" + +_float_with_thousands_separators = re.compile( + r"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\.[0-9]*|\.[0-9]+)?$" +) + + +def simple_separated_format(separator): + """Construct a simple TableFormat with columns separated by a separator. + + >>> tsv = simple_separated_format("\\t") ; \ + tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' + True + + """ + return TableFormat( + None, + None, + None, + None, + headerrow=DataRow("", separator, ""), + datarow=DataRow("", separator, ""), + padding=0, + with_header_hide=None, + ) + + +def _isnumber_with_thousands_separator(string): + """ + >>> _isnumber_with_thousands_separator(".") + False + >>> _isnumber_with_thousands_separator("1") + True + >>> _isnumber_with_thousands_separator("1.") + True + >>> _isnumber_with_thousands_separator(".1") + True + >>> _isnumber_with_thousands_separator("1000") + False + >>> _isnumber_with_thousands_separator("1,000") + True + >>> _isnumber_with_thousands_separator("1,0000") + False + >>> _isnumber_with_thousands_separator("1,000.1234") + True + >>> _isnumber_with_thousands_separator(b"1,000.1234") + True + >>> _isnumber_with_thousands_separator("+1,000.1234") + True + >>> _isnumber_with_thousands_separator("-1,000.1234") + True + """ + try: + string = string.decode() + except (UnicodeDecodeError, AttributeError): + pass + + return bool(re.match(_float_with_thousands_separators, string)) + + +def _isconvertible(conv, string): + try: + conv(string) + return True + except (ValueError, TypeError): + return False + + +def _isnumber(string): + """ + >>> _isnumber("123.45") + True + >>> _isnumber("123") + True + >>> _isnumber("spam") + False + >>> _isnumber("123e45678") + False + >>> _isnumber("inf") + True + """ + if not _isconvertible(float, string): + return False + elif isinstance(string, (str, bytes)) and ( + math.isinf(float(string)) or math.isnan(float(string)) + ): + return string.lower() in ["inf", "-inf", "nan"] + return True + + +def _isint(string, inttype=int): + """ + >>> _isint("123") + True + >>> _isint("123.45") + False + """ + return ( + type(string) is inttype + or isinstance(string, (bytes, str)) + and _isconvertible(inttype, string) + ) + + +def _isbool(string): + """ + >>> _isbool(True) + True + >>> _isbool("False") + True + >>> _isbool(1) + False + """ + return type(string) is bool or ( + isinstance(string, (bytes, str)) and string in ("True", "False") + ) + + +def _type(string, has_invisible=True, numparse=True): + """The least generic type (type(None), int, float, str, unicode). + + >>> _type(None) is type(None) + True + >>> _type("foo") is type("") + True + >>> _type("1") is type(1) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + >>> _type('\x1b[31m42\x1b[0m') is type(42) + True + + """ + + if has_invisible and isinstance(string, (str, bytes)): + string = _strip_ansi(string) + + if string is None: + return type(None) + elif hasattr(string, "isoformat"): # datetime.datetime, date, and time + return str + elif _isbool(string): + return bool + elif _isint(string) and numparse: + return int + elif _isnumber(string) and numparse: + return float + elif isinstance(string, bytes): + return bytes + else: + return str + + +def _afterpoint(string): + """Symbols after a decimal point, -1 if the string lacks the decimal point. + + >>> _afterpoint("123.45") + 2 + >>> _afterpoint("1001") + -1 + >>> _afterpoint("eggs") + -1 + >>> _afterpoint("123e45") + 2 + >>> _afterpoint("123,456.78") + 2 + + """ + if _isnumber(string) or _isnumber_with_thousands_separator(string): + if _isint(string): + return -1 + else: + pos = string.rfind(".") + pos = string.lower().rfind("e") if pos < 0 else pos + if pos >= 0: + return len(string) - pos - 1 + else: + return -1 # no point + else: + return -1 # not a number + + +def _padleft(width, s): + """Flush right. + + >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430' + True + + """ + fmt = "{0:>%ds}" % width + return fmt.format(s) + + +def _padright(width, s): + """Flush left. + + >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:<%ds}" % width + return fmt.format(s) + + +def _padboth(width, s): + """Center string. + + >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 ' + True + + """ + fmt = "{0:^%ds}" % width + return fmt.format(s) + + +def _padnone(ignore_width, s): + return s + + +def _strip_ansi(s): + r"""Remove ANSI escape sequences, both CSI (color codes, etc) and OSC hyperlinks. + + CSI sequences are simply removed from the output, while OSC hyperlinks are replaced + with the link text. Note: it may be desirable to show the URI instead but this is not + supported. + + >>> repr(_strip_ansi('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\')) + "'This is a link'" + + >>> repr(_strip_ansi('\x1b[31mred\x1b[0m text')) + "'red text'" + + """ + if isinstance(s, str): + return _ansi_codes.sub(r"\4", s) + else: # a bytestring + return _ansi_codes_bytes.sub(r"\4", s) + + +def _visible_width(s): + """Visible width of a printed string. ANSI color codes are removed. + + >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") + (5, 5) + + """ + # optional wide-character support + if wcwidth is not None and WIDE_CHARS_MODE: + len_fn = wcwidth.wcswidth + else: + len_fn = len + if isinstance(s, (str, bytes)): + return len_fn(_strip_ansi(s)) + else: + return len_fn(str(s)) + + +def _is_multiline(s): + if isinstance(s, str): + return bool(re.search(_multiline_codes, s)) + else: # a bytestring + return bool(re.search(_multiline_codes_bytes, s)) + + +def _multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return max(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _choose_width_fn(has_invisible, enable_widechars, is_multiline): + """Return a function to calculate visible cell width.""" + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_choose_padfn(strings, alignment, has_invisible): + if alignment == "right": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padleft + elif alignment == "center": + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padboth + elif alignment == "decimal": + if has_invisible: + decimals = [_afterpoint(_strip_ansi(s)) for s in strings] + else: + decimals = [_afterpoint(s) for s in strings] + maxdecimals = max(decimals) + strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] + padfn = _padleft + elif not alignment: + padfn = _padnone + else: + if not PRESERVE_WHITESPACE: + strings = [s.strip() for s in strings] + padfn = _padright + return strings, padfn + + +def _align_column_choose_width_fn(has_invisible, enable_widechars, is_multiline): + if has_invisible: + line_width_fn = _visible_width + elif enable_widechars: # optional wide-character support if available + line_width_fn = wcwidth.wcswidth + else: + line_width_fn = len + if is_multiline: + width_fn = lambda s: _align_column_multiline_width(s, line_width_fn) # noqa + else: + width_fn = line_width_fn + return width_fn + + +def _align_column_multiline_width(multiline_s, line_width_fn=len): + """Visible width of a potentially multiline content.""" + return list(map(line_width_fn, re.split("[\r\n]", multiline_s))) + + +def _flat_list(nested_list): + ret = [] + for item in nested_list: + if isinstance(item, list): + for subitem in item: + ret.append(subitem) + else: + ret.append(item) + return ret + + +def _align_column( + strings, + alignment, + minwidth=0, + has_invisible=True, + enable_widechars=False, + is_multiline=False, +): + """[string] -> [padded_string]""" + strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible) + width_fn = _align_column_choose_width_fn( + has_invisible, enable_widechars, is_multiline + ) + + s_widths = list(map(width_fn, strings)) + maxwidth = max(max(_flat_list(s_widths)), minwidth) + # TODO: refactor column alignment in single-line and multiline modes + if is_multiline: + if not enable_widechars and not has_invisible: + padded_strings = [ + "\n".join([padfn(maxwidth, s) for s in ms.splitlines()]) + for ms in strings + ] + else: + # enable wide-character width corrections + s_lens = [[len(s) for s in re.split("[\r\n]", ms)] for ms in strings] + visible_widths = [ + [maxwidth - (w - l) for w, l in zip(mw, ml)] + for mw, ml in zip(s_widths, s_lens) + ] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [ + "\n".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)]) + for ms, mw in zip(strings, visible_widths) + ] + else: # single-line cell values + if not enable_widechars and not has_invisible: + padded_strings = [padfn(maxwidth, s) for s in strings] + else: + # enable wide-character width corrections + s_lens = list(map(len, strings)) + visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)] + # wcswidth and _visible_width don't count invisible characters; + # padfn doesn't need to apply another correction + padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)] + return padded_strings + + +def _more_generic(type1, type2): + types = { + type(None): 0, + bool: 1, + int: 2, + float: 3, + bytes: 4, + str: 5, + } + invtypes = { + 5: str, + 4: bytes, + 3: float, + 2: int, + 1: bool, + 0: type(None), + } + moregeneric = max(types.get(type1, 5), types.get(type2, 5)) + return invtypes[moregeneric] + + +def _column_type(strings, has_invisible=True, numparse=True): + """The least generic type all column values are convertible to. + + >>> _column_type([True, False]) is bool + True + >>> _column_type(["1", "2"]) is int + True + >>> _column_type(["1", "2.3"]) is float + True + >>> _column_type(["1", "2.3", "four"]) is str + True + >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is str + True + >>> _column_type([None, "brux"]) is str + True + >>> _column_type([1, 2, None]) is int + True + >>> import datetime as dt + >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is str + True + + """ + types = [_type(s, has_invisible, numparse) for s in strings] + return reduce(_more_generic, types, bool) + + +def _format(val, valtype, floatfmt, intfmt, missingval="", has_invisible=True): + """Format a value according to its type. + + Unicode is supported: + + >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ + tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ + good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ + tabulate(tbl, headers=hrow) == good_result + True + + """ # noqa + if val is None: + return missingval + + if valtype is str: + return f"{val}" + elif valtype is int: + return format(val, intfmt) + elif valtype is bytes: + try: + return str(val, "ascii") + except (TypeError, UnicodeDecodeError): + return str(val) + elif valtype is float: + is_a_colored_number = has_invisible and isinstance(val, (str, bytes)) + if is_a_colored_number: + raw_val = _strip_ansi(val) + formatted_val = format(float(raw_val), floatfmt) + return val.replace(raw_val, formatted_val) + else: + return format(float(val), floatfmt) + else: + return f"{val}" + + +def _align_header( + header, alignment, width, visible_width, is_multiline=False, width_fn=None +): + "Pad string header to width chars given known visible_width of the header." + if is_multiline: + header_lines = re.split(_multiline_codes, header) + padded_lines = [ + _align_header(h, alignment, width, width_fn(h)) for h in header_lines + ] + return "\n".join(padded_lines) + # else: not multiline + ninvisible = len(header) - visible_width + width += ninvisible + if alignment == "left": + return _padright(width, header) + elif alignment == "center": + return _padboth(width, header) + elif not alignment: + return f"{header}" + else: + return _padleft(width, header) + + +def _remove_separating_lines(rows): + if type(rows) == list: + separating_lines = [] + sans_rows = [] + for index, row in enumerate(rows): + if _is_separating_line(row): + separating_lines.append(index) + else: + sans_rows.append(row) + return sans_rows, separating_lines + else: + return rows, None + + +def _reinsert_separating_lines(rows, separating_lines): + if separating_lines: + for index in separating_lines: + rows.insert(index, SEPARATING_LINE) + + +def _prepend_row_index(rows, index): + """Add a left-most index column.""" + if index is None or index is False: + return rows + if isinstance(index, Sized) and len(index) != len(rows): + raise ValueError( + "index must be as long as the number of data rows: " + + "len(index)={} len(rows)={}".format(len(index), len(rows)) + ) + sans_rows, separating_lines = _remove_separating_lines(rows) + new_rows = [] + index_iter = iter(index) + for row in sans_rows: + index_v = next(index_iter) + new_rows.append([index_v] + list(row)) + rows = new_rows + _reinsert_separating_lines(rows, separating_lines) + return rows + + +def _bool(val): + "A wrapper around standard bool() which doesn't throw on NumPy arrays" + try: + return bool(val) + except ValueError: # val is likely to be a numpy array with many elements + return False + + +def _normalize_tabular_data(tabular_data, headers, showindex="default"): + """Transform a supported data type to a list of lists, and a list of headers. + + Supported tabular data types: + + * list-of-lists or another iterable of iterables + + * list of named tuples (usually used with headers="keys") + + * list of dicts (usually used with headers="keys") + + * list of OrderedDicts (usually used with headers="keys") + + * list of dataclasses (Python 3.7+ only, usually used with headers="keys") + + * 2D NumPy arrays + + * NumPy record arrays (usually used with headers="keys") + + * dict of iterables (usually used with headers="keys") + + * pandas.DataFrame (usually used with headers="keys") + + The first row can be used as headers if headers="firstrow", + column indices can be used as headers if headers="keys". + + If showindex="default", show row indices of the pandas.DataFrame. + If showindex="always", show row indices for all types of data. + If showindex="never", don't show row indices for all types of data. + If showindex is an iterable, show its values as row indices. + + """ + + try: + bool(headers) + is_headers2bool_broken = False # noqa + except ValueError: # numpy.ndarray, pandas.core.index.Index, ... + is_headers2bool_broken = True # noqa + headers = list(headers) + + index = None + if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): + # dict-like and pandas.DataFrame? + if hasattr(tabular_data.values, "__call__"): + # likely a conventional dict + keys = tabular_data.keys() + rows = list( + izip_longest(*tabular_data.values()) + ) # columns have to be transposed + elif hasattr(tabular_data, "index"): + # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) + keys = list(tabular_data) + if ( + showindex in ["default", "always", True] + and tabular_data.index.name is not None + ): + if isinstance(tabular_data.index.name, list): + keys[:0] = tabular_data.index.name + else: + keys[:0] = [tabular_data.index.name] + vals = tabular_data.values # values matrix doesn't need to be transposed + # for DataFrames add an index per default + index = list(tabular_data.index) + rows = [list(row) for row in vals] + else: + raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") + + if headers == "keys": + headers = list(map(str, keys)) # headers should be strings + + else: # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses + rows = list(tabular_data) + + if headers == "keys" and not rows: + # an empty table (issue #81) + headers = [] + elif ( + headers == "keys" + and hasattr(tabular_data, "dtype") + and getattr(tabular_data.dtype, "names") + ): + # numpy record array + headers = tabular_data.dtype.names + elif ( + headers == "keys" + and len(rows) > 0 + and isinstance(rows[0], tuple) + and hasattr(rows[0], "_fields") + ): + # namedtuple + headers = list(map(str, rows[0]._fields)) + elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"): + # dict-like object + uniq_keys = set() # implements hashed lookup + keys = [] # storage for set + if headers == "firstrow": + firstdict = rows[0] if len(rows) > 0 else {} + keys.extend(firstdict.keys()) + uniq_keys.update(keys) + rows = rows[1:] + for row in rows: + for k in row.keys(): + # Save unique items in input order + if k not in uniq_keys: + keys.append(k) + uniq_keys.add(k) + if headers == "keys": + headers = keys + elif isinstance(headers, dict): + # a dict of headers for a list of dicts + headers = [headers.get(k, k) for k in keys] + headers = list(map(str, headers)) + elif headers == "firstrow": + if len(rows) > 0: + headers = [firstdict.get(k, k) for k in keys] + headers = list(map(str, headers)) + else: + headers = [] + elif headers: + raise ValueError( + "headers for a list of dicts is not a dict or a keyword" + ) + rows = [[row.get(k) for k in keys] for row in rows] + + elif ( + headers == "keys" + and hasattr(tabular_data, "description") + and hasattr(tabular_data, "fetchone") + and hasattr(tabular_data, "rowcount") + ): + # Python Database API cursor object (PEP 0249) + # print tabulate(cursor, headers='keys') + headers = [column[0] for column in tabular_data.description] + + elif ( + dataclasses is not None + and len(rows) > 0 + and dataclasses.is_dataclass(rows[0]) + ): + # Python 3.7+'s dataclass + field_names = [field.name for field in dataclasses.fields(rows[0])] + if headers == "keys": + headers = field_names + rows = [[getattr(row, f) for f in field_names] for row in rows] + + elif headers == "keys" and len(rows) > 0: + # keys are column indices + headers = list(map(str, range(len(rows[0])))) + + # take headers from the first row if necessary + if headers == "firstrow" and len(rows) > 0: + if index is not None: + headers = [index[0]] + list(rows[0]) + index = index[1:] + else: + headers = rows[0] + headers = list(map(str, headers)) # headers should be strings + rows = rows[1:] + elif headers == "firstrow": + headers = [] + + headers = list(map(str, headers)) + # rows = list(map(list, rows)) + rows = list(map(lambda r: r if _is_separating_line(r) else list(r), rows)) + + # add or remove an index column + showindex_is_a_str = type(showindex) in [str, bytes] + if showindex == "default" and index is not None: + rows = _prepend_row_index(rows, index) + elif isinstance(showindex, Sized) and not showindex_is_a_str: + rows = _prepend_row_index(rows, list(showindex)) + elif isinstance(showindex, Iterable) and not showindex_is_a_str: + rows = _prepend_row_index(rows, showindex) + elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str): + if index is None: + index = list(range(len(rows))) + rows = _prepend_row_index(rows, index) + elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str): + pass + + # pad with empty headers for initial columns if necessary + if headers and len(rows) > 0: + nhs = len(headers) + ncols = len(rows[0]) + if nhs < ncols: + headers = [""] * (ncols - nhs) + headers + + return rows, headers + + +def _wrap_text_to_colwidths(list_of_lists, colwidths, numparses=True): + numparses = _expand_iterable(numparses, len(list_of_lists[0]), True) + + result = [] + + for row in list_of_lists: + new_row = [] + for cell, width, numparse in zip(row, colwidths, numparses): + if _isnumber(cell) and numparse: + new_row.append(cell) + continue + + if width is not None: + wrapper = _CustomTextWrap(width=width) + # Cast based on our internal type handling + # Any future custom formatting of types (such as datetimes) + # may need to be more explicit than just `str` of the object + casted_cell = ( + str(cell) if _isnumber(cell) else _type(cell, numparse)(cell) + ) + wrapped = wrapper.wrap(casted_cell) + new_row.append("\n".join(wrapped)) + else: + new_row.append(cell) + result.append(new_row) + + return result + + +def _to_str(s, encoding="utf8", errors="ignore"): + """ + A type safe wrapper for converting a bytestring to str. This is essentially just + a wrapper around .decode() intended for use with things like map(), but with some + specific behavior: + + 1. if the given parameter is not a bytestring, it is returned unmodified + 2. decode() is called for the given parameter and assumes utf8 encoding, but the + default error behavior is changed from 'strict' to 'ignore' + + >>> repr(_to_str(b'foo')) + "'foo'" + + >>> repr(_to_str('foo')) + "'foo'" + + >>> repr(_to_str(42)) + "'42'" + + """ + if isinstance(s, bytes): + return s.decode(encoding=encoding, errors=errors) + return str(s) + + +def tabulate( + tabular_data, + headers=(), + tablefmt="simple", + floatfmt=_DEFAULT_FLOATFMT, + intfmt=_DEFAULT_INTFMT, + numalign=_DEFAULT_ALIGN, + stralign=_DEFAULT_ALIGN, + missingval=_DEFAULT_MISSINGVAL, + showindex="default", + disable_numparse=False, + colalign=None, + maxcolwidths=None, + rowalign=None, + maxheadercolwidths=None, +): + """Format a fixed width table for pretty printing. + + >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]])) + --- --------- + 1 2.34 + -56 8.999 + 2 10001 + --- --------- + + The first required argument (`tabular_data`) can be a + list-of-lists (or another iterable of iterables), a list of named + tuples, a dictionary of iterables, an iterable of dictionaries, + an iterable of dataclasses (Python 3.7+), a two-dimensional NumPy array, + NumPy record array, or a Pandas' dataframe. + + + Table headers + ------------- + + To print nice column headers, supply the second argument (`headers`): + + - `headers` can be an explicit list of column headers + - if `headers="firstrow"`, then the first row of data is used + - if `headers="keys"`, then dictionary keys or column indices are used + + Otherwise a headerless table is produced. + + If the number of headers is less than the number of columns, they + are supposed to be names of the last columns. This is consistent + with the plain-text format of R and Pandas' dataframes. + + >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]], + ... headers="firstrow")) + sex age + ----- ----- ----- + Alice F 24 + Bob M 19 + + By default, pandas.DataFrame data have an additional column called + row index. To add a similar column to all other types of data, + use `showindex="always"` or `showindex=True`. To suppress row indices + for all types of data, pass `showindex="never" or `showindex=False`. + To add a custom row index column, pass `showindex=some_iterable`. + + >>> print(tabulate([["F",24],["M",19]], showindex="always")) + - - -- + 0 F 24 + 1 M 19 + - - -- + + + Column alignment + ---------------- + + `tabulate` tries to detect column types automatically, and aligns + the values properly. By default it aligns decimal points of the + numbers (or flushes integer numbers to the right), and flushes + everything else to the left. Possible column alignments + (`numalign`, `stralign`) are: "right", "center", "left", "decimal" + (only for `numalign`), and None (to disable alignment). + + + Table formats + ------------- + + `intfmt` is a format specification used for columns which + contain numeric data without a decimal point. This can also be + a list or tuple of format strings, one per column. + + `floatfmt` is a format specification used for columns which + contain numeric data with a decimal point. This can also be + a list or tuple of format strings, one per column. + + `None` values are replaced with a `missingval` string (like + `floatfmt`, this can also be a list of values for different + columns): + + >>> print(tabulate([["spam", 1, None], + ... ["eggs", 42, 3.14], + ... ["other", None, 2.7]], missingval="?")) + ----- -- ---- + spam 1 ? + eggs 42 3.14 + other ? 2.7 + ----- -- ---- + + Various plain-text table formats (`tablefmt`) are supported: + 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki', + 'latex', 'latex_raw', 'latex_booktabs', 'latex_longtable' and tsv. + Variable `tabulate_formats`contains the list of currently supported formats. + + "plain" format doesn't use any pseudographics to draw tables, + it separates columns with a double space: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "plain")) + strings numbers + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain")) + spam 41.9999 + eggs 451 + + "simple" format is like Pandoc simple_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple")) + strings numbers + --------- --------- + spam 41.9999 + eggs 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple")) + ---- -------- + spam 41.9999 + eggs 451 + ---- -------- + + "grid" is similar to tables produced by Emacs table.el package or + Pandoc grid_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "grid")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + +-----------+-----------+ + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid")) + +------+----------+ + | spam | 41.9999 | + +------+----------+ + | eggs | 451 | + +------+----------+ + + "simple_grid" draws a grid using single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_grid")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_grid" draws a grid using single-line box-drawing + characters with rounded corners: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_grid")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_grid" draws a grid using bold (thick) single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_grid")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_grid" draws a grid using a mix of light (thin) and heavy (thick) lines + box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_grid")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_grid" draws a grid using double-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_grid")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ╠═══════════╬═══════════╣ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_grid" draws a grid using a mix of single and + double-line box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_grid")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "outline" is the same as the "grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "outline")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="outline")) + +------+----------+ + | spam | 41.9999 | + | eggs | 451 | + +------+----------+ + + "simple_outline" is the same as the "simple_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_outline")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_outline" is the same as the "rounded_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_outline")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_outline" is the same as the "heavy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_outline")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_outline" is the same as the "mixed_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_outline")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_outline" is the same as the "double_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_outline")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_outline" is the same as the "fancy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_outline")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + + "pipe" is like tables in PHP Markdown Extra extension or Pandoc + pipe_tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "pipe")) + | strings | numbers | + |:----------|----------:| + | spam | 41.9999 | + | eggs | 451 | + + "presto" is like tables produce by the Presto CLI: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "presto")) + strings | numbers + -----------+----------- + spam | 41.9999 + eggs | 451 + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe")) + |:-----|---------:| + | spam | 41.9999 | + | eggs | 451 | + + "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They + are slightly different from "pipe" format by not using colons to + define column alignment, and using a "+" sign to indicate line + intersections: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "orgtbl")) + | strings | numbers | + |-----------+-----------| + | spam | 41.9999 | + | eggs | 451 | + + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl")) + | spam | 41.9999 | + | eggs | 451 | + + "rst" is like a simple table format from reStructuredText; please + note that reStructuredText accepts also "grid" tables: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rst")) + ========= ========= + strings numbers + ========= ========= + spam 41.9999 + eggs 451 + ========= ========= + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst")) + ==== ======== + spam 41.9999 + eggs 451 + ==== ======== + + "mediawiki" produces a table markup used in Wikipedia and on other + MediaWiki-based sites: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="mediawiki")) + {| class="wikitable" style="text-align: left;" + |+ + |- + ! strings !! align="right"| numbers + |- + | spam || align="right"| 41.9999 + |- + | eggs || align="right"| 451 + |} + + "html" produces HTML markup as an html.escape'd str + with a ._repr_html_ method so that Jupyter Lab and Notebook display the HTML + and a .str property so that the raw HTML remains accessible + the unsafehtml table format can be used if an unescaped HTML format is required: + + >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]], + ... headers="firstrow", tablefmt="html")) + + + + + + + + +
strings numbers
spam 41.9999
eggs 451
+ + "latex" produces a tabular environment of LaTeX document markup: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex")) + \\begin{tabular}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{tabular} + + "latex_raw" is similar to "latex", but doesn't escape special characters, + such as backslash and underscore, so LaTeX commands may embedded into + cells' values: + + >>> print(tabulate([["spam$_9$", 41.9999], ["\\\\emph{eggs}", "451.0"]], tablefmt="latex_raw")) + \\begin{tabular}{lr} + \\hline + spam$_9$ & 41.9999 \\\\ + \\emph{eggs} & 451 \\\\ + \\hline + \\end{tabular} + + "latex_booktabs" produces a tabular environment of LaTeX document markup + using the booktabs.sty package: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs")) + \\begin{tabular}{lr} + \\toprule + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\bottomrule + \\end{tabular} + + "latex_longtable" produces a tabular environment that can stretch along + multiple pages, using the longtable package for LaTeX. + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_longtable")) + \\begin{longtable}{lr} + \\hline + spam & 41.9999 \\\\ + eggs & 451 \\\\ + \\hline + \\end{longtable} + + + Number parsing + -------------- + By default, anything which can be parsed as a number is a number. + This ensures numbers represented as strings are aligned properly. + This can lead to weird results for particular strings such as + specific git SHAs e.g. "42992e1" will be parsed into the number + 429920 and aligned as such. + + To completely disable number parsing (and alignment), use + `disable_numparse=True`. For more fine grained control, a list column + indices is used to disable number parsing only on those columns + e.g. `disable_numparse=[0, 2]` would disable number parsing only on the + first and third columns. + + Column Widths and Auto Line Wrapping + ------------------------------------ + Tabulate will, by default, set the width of each column to the length of the + longest element in that column. However, in situations where fields are expected + to reasonably be too long to look good as a single line, tabulate can help automate + word wrapping long fields for you. Use the parameter `maxcolwidth` to provide a + list of maximal column widths + + >>> print(tabulate( \ + [('1', 'John Smith', \ + 'This is a rather long description that might look better if it is wrapped a bit')], \ + headers=("Issue Id", "Author", "Description"), \ + maxcolwidths=[None, None, 30], \ + tablefmt="grid" \ + )) + +------------+------------+-------------------------------+ + | Issue Id | Author | Description | + +============+============+===============================+ + | 1 | John Smith | This is a rather long | + | | | description that might look | + | | | better if it is wrapped a bit | + +------------+------------+-------------------------------+ + + Header column width can be specified in a similar way using `maxheadercolwidth` + + """ + + if tabular_data is None: + tabular_data = [] + + list_of_lists, headers = _normalize_tabular_data( + tabular_data, headers, showindex=showindex + ) + list_of_lists, separating_lines = _remove_separating_lines(list_of_lists) + + if maxcolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxcolwidths, int): # Expand scalar for all columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, maxcolwidths) + else: # Ignore col width for any 'trailing' columns + maxcolwidths = _expand_iterable(maxcolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + list_of_lists = _wrap_text_to_colwidths( + list_of_lists, maxcolwidths, numparses=numparses + ) + + if maxheadercolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxheadercolwidths, int): # Expand scalar for all columns + maxheadercolwidths = _expand_iterable( + maxheadercolwidths, num_cols, maxheadercolwidths + ) + else: # Ignore col width for any 'trailing' columns + maxheadercolwidths = _expand_iterable(maxheadercolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + headers = _wrap_text_to_colwidths( + [headers], maxheadercolwidths, numparses=numparses + )[0] + + # empty values in the first column of RST tables should be escaped (issue #82) + # "" should be escaped as "\\ " or ".." + if tablefmt == "rst": + list_of_lists, headers = _rst_escape_first_column(list_of_lists, headers) + + # PrettyTable formatting does not use any extra padding. + # Numbers are not parsed and are treated the same as strings for alignment. + # Check if pretty is the format being used and override the defaults so it + # does not impact other formats. + min_padding = MIN_PADDING + if tablefmt == "pretty": + min_padding = 0 + disable_numparse = True + numalign = "center" if numalign == _DEFAULT_ALIGN else numalign + stralign = "center" if stralign == _DEFAULT_ALIGN else stralign + else: + numalign = "decimal" if numalign == _DEFAULT_ALIGN else numalign + stralign = "left" if stralign == _DEFAULT_ALIGN else stralign + + # optimization: look for ANSI control codes once, + # enable smart width functions only if a control code is found + # + # convert the headers and rows into a single, tab-delimited string ensuring + # that any bytestrings are decoded safely (i.e. errors ignored) + plain_text = "\t".join( + chain( + # headers + map(_to_str, headers), + # rows: chain the rows together into a single iterable after mapping + # the bytestring conversino to each cell value + chain.from_iterable(map(_to_str, row) for row in list_of_lists), + ) + ) + + has_invisible = _ansi_codes.search(plain_text) is not None + + enable_widechars = wcwidth is not None and WIDE_CHARS_MODE + if ( + not isinstance(tablefmt, TableFormat) + and tablefmt in multiline_formats + and _is_multiline(plain_text) + ): + tablefmt = multiline_formats.get(tablefmt, tablefmt) + is_multiline = True + else: + is_multiline = False + width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline) + + # format rows and columns, convert numeric values to strings + cols = list(izip_longest(*list_of_lists)) + numparses = _expand_numparse(disable_numparse, len(cols)) + coltypes = [_column_type(col, numparse=np) for col, np in zip(cols, numparses)] + if isinstance(floatfmt, str): # old version + float_formats = len(cols) * [ + floatfmt + ] # just duplicate the string to use in each column + else: # if floatfmt is list, tuple etc we have one per column + float_formats = list(floatfmt) + if len(float_formats) < len(cols): + float_formats.extend((len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT]) + if isinstance(intfmt, str): # old version + int_formats = len(cols) * [ + intfmt + ] # just duplicate the string to use in each column + else: # if intfmt is list, tuple etc we have one per column + int_formats = list(intfmt) + if len(int_formats) < len(cols): + int_formats.extend((len(cols) - len(int_formats)) * [_DEFAULT_INTFMT]) + if isinstance(missingval, str): + missing_vals = len(cols) * [missingval] + else: + missing_vals = list(missingval) + if len(missing_vals) < len(cols): + missing_vals.extend((len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL]) + cols = [ + [_format(v, ct, fl_fmt, int_fmt, miss_v, has_invisible) for v in c] + for c, ct, fl_fmt, int_fmt, miss_v in zip( + cols, coltypes, float_formats, int_formats, missing_vals + ) + ] + + # align columns + aligns = [numalign if ct in [int, float] else stralign for ct in coltypes] + if colalign is not None: + assert isinstance(colalign, Iterable) + for idx, align in enumerate(colalign): + aligns[idx] = align + minwidths = ( + [width_fn(h) + min_padding for h in headers] if headers else [0] * len(cols) + ) + cols = [ + _align_column(c, a, minw, has_invisible, enable_widechars, is_multiline) + for c, a, minw in zip(cols, aligns, minwidths) + ] + + if headers: + # align headers and add headers + t_cols = cols or [[""]] * len(headers) + t_aligns = aligns or [stralign] * len(headers) + minwidths = [ + max(minw, max(width_fn(cl) for cl in c)) + for minw, c in zip(minwidths, t_cols) + ] + headers = [ + _align_header(h, a, minw, width_fn(h), is_multiline, width_fn) + for h, a, minw in zip(headers, t_aligns, minwidths) + ] + rows = list(zip(*cols)) + else: + minwidths = [max(width_fn(cl) for cl in c) for c in cols] + rows = list(zip(*cols)) + + if not isinstance(tablefmt, TableFormat): + tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) + + ra_default = rowalign if isinstance(rowalign, str) else None + rowaligns = _expand_iterable(rowalign, len(rows), ra_default) + _reinsert_separating_lines(rows, separating_lines) + + return _format_table( + tablefmt, headers, rows, minwidths, aligns, is_multiline, rowaligns=rowaligns + ) + + +def _expand_numparse(disable_numparse, column_count): + """ + Return a list of bools of length `column_count` which indicates whether + number parsing should be used on each column. + If `disable_numparse` is a list of indices, each of those indices are False, + and everything else is True. + If `disable_numparse` is a bool, then the returned list is all the same. + """ + if isinstance(disable_numparse, Iterable): + numparses = [True] * column_count + for index in disable_numparse: + numparses[index] = False + return numparses + else: + return [not disable_numparse] * column_count + + +def _expand_iterable(original, num_desired, default): + """ + Expands the `original` argument to return a return a list of + length `num_desired`. If `original` is shorter than `num_desired`, it will + be padded with the value in `default`. + If `original` is not a list to begin with (i.e. scalar value) a list of + length `num_desired` completely populated with `default will be returned + """ + if isinstance(original, Iterable) and not isinstance(original, str): + return original + [default] * (num_desired - len(original)) + else: + return [default] * num_desired + + +def _pad_row(cells, padding): + if cells: + pad = " " * padding + padded_cells = [pad + cell + pad for cell in cells] + return padded_cells + else: + return cells + + +def _build_simple_row(padded_cells, rowfmt): + "Format row according to DataRow format without padding." + begin, sep, end = rowfmt + return (begin + sep.join(padded_cells) + end).rstrip() + + +def _build_row(padded_cells, colwidths, colaligns, rowfmt): + "Return a string which represents a row of data cells." + if not rowfmt: + return None + if hasattr(rowfmt, "__call__"): + return rowfmt(padded_cells, colwidths, colaligns) + else: + return _build_simple_row(padded_cells, rowfmt) + + +def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt, rowalign=None): + # NOTE: rowalign is ignored and exists for api compatibility with _append_multiline_row + lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt)) + return lines + + +def _align_cell_veritically(text_lines, num_lines, column_width, row_alignment): + delta_lines = num_lines - len(text_lines) + blank = [" " * column_width] + if row_alignment == "bottom": + return blank * delta_lines + text_lines + elif row_alignment == "center": + top_delta = delta_lines // 2 + bottom_delta = delta_lines - top_delta + return top_delta * blank + text_lines + bottom_delta * blank + else: + return text_lines + blank * delta_lines + + +def _append_multiline_row( + lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad, rowalign=None +): + colwidths = [w - 2 * pad for w in padded_widths] + cells_lines = [c.splitlines() for c in padded_multiline_cells] + nlines = max(map(len, cells_lines)) # number of lines in the row + # vertically pad cells where some lines are missing + # cells_lines = [ + # (cl + [" " * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths) + # ] + + cells_lines = [ + _align_cell_veritically(cl, nlines, w, rowalign) + for cl, w in zip(cells_lines, colwidths) + ] + lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)] + for ln in lines_cells: + padded_ln = _pad_row(ln, pad) + _append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt) + return lines + + +def _build_line(colwidths, colaligns, linefmt): + "Return a string which represents a horizontal line." + if not linefmt: + return None + if hasattr(linefmt, "__call__"): + return linefmt(colwidths, colaligns) + else: + begin, fill, sep, end = linefmt + cells = [fill * w for w in colwidths] + return _build_simple_row(cells, (begin, sep, end)) + + +def _append_line(lines, colwidths, colaligns, linefmt): + lines.append(_build_line(colwidths, colaligns, linefmt)) + return lines + + +class JupyterHTMLStr(str): + """Wrap the string with a _repr_html_ method so that Jupyter + displays the HTML table""" + + def _repr_html_(self): + return self + + @property + def str(self): + """add a .str property so that the raw string is still accessible""" + return self + + +def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline, rowaligns): + """Produce a plain-text representation of the table.""" + lines = [] + hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] + pad = fmt.padding + headerrow = fmt.headerrow + + padded_widths = [(w + 2 * pad) for w in colwidths] + if is_multiline: + pad_row = lambda row, _: row # noqa do it later, in _append_multiline_row + append_row = partial(_append_multiline_row, pad=pad) + else: + pad_row = _pad_row + append_row = _append_basic_row + + padded_headers = pad_row(headers, pad) + padded_rows = [pad_row(row, pad) for row in rows] + + if fmt.lineabove and "lineabove" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.lineabove) + + if padded_headers: + append_row(lines, padded_headers, padded_widths, colaligns, headerrow) + if fmt.linebelowheader and "linebelowheader" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelowheader) + + if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: + # initial rows with a line below + for row, ralign in zip(padded_rows[:-1], rowaligns): + append_row( + lines, row, padded_widths, colaligns, fmt.datarow, rowalign=ralign + ) + _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows) + # the last row without a line below + append_row( + lines, + padded_rows[-1], + padded_widths, + colaligns, + fmt.datarow, + rowalign=rowaligns[-1], + ) + else: + separating_line = ( + fmt.linebetweenrows + or fmt.linebelowheader + or fmt.linebelow + or fmt.lineabove + or Line("", "", "", "") + ) + for row in padded_rows: + # test to see if either the 1st column or the 2nd column (account for showindex) has + # the SEPARATING_LINE flag + if _is_separating_line(row): + _append_line(lines, padded_widths, colaligns, separating_line) + else: + append_row(lines, row, padded_widths, colaligns, fmt.datarow) + + if fmt.linebelow and "linebelow" not in hidden: + _append_line(lines, padded_widths, colaligns, fmt.linebelow) + + if headers or rows: + output = "\n".join(lines) + if fmt.lineabove == _html_begin_table_without_header: + return JupyterHTMLStr(output) + else: + return output + else: # a completely empty table + return "" + + +class _CustomTextWrap(textwrap.TextWrapper): + """A custom implementation of CPython's textwrap.TextWrapper. This supports + both wide characters (Korea, Japanese, Chinese) - including mixed string. + For the most part, the `_handle_long_word` and `_wrap_chunks` functions were + copy pasted out of the CPython baseline, and updated with our custom length + and line appending logic. + """ + + def __init__(self, *args, **kwargs): + self._active_codes = [] + self.max_lines = None # For python2 compatibility + textwrap.TextWrapper.__init__(self, *args, **kwargs) + + @staticmethod + def _len(item): + """Custom len that gets console column width for wide + and non-wide characters as well as ignores color codes""" + stripped = _strip_ansi(item) + if wcwidth: + return wcwidth.wcswidth(stripped) + else: + return len(stripped) + + def _update_lines(self, lines, new_line): + """Adds a new line to the list of lines the text is being wrapped into + This function will also track any ANSI color codes in this string as well + as add any colors from previous lines order to preserve the same formatting + as a single unwrapped string. + """ + code_matches = [x for x in _ansi_codes.finditer(new_line)] + color_codes = [ + code.string[code.span()[0] : code.span()[1]] for code in code_matches + ] + + # Add color codes from earlier in the unwrapped line, and then track any new ones we add. + new_line = "".join(self._active_codes) + new_line + + for code in color_codes: + if code != _ansi_color_reset_code: + self._active_codes.append(code) + else: # A single reset code resets everything + self._active_codes = [] + + # Always ensure each line is color terminted if any colors are + # still active, otherwise colors will bleed into other cells on the console + if len(self._active_codes) > 0: + new_line = new_line + _ansi_color_reset_code + + lines.append(new_line) + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + if width < 1: + space_left = 1 + else: + space_left = width - cur_len + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + # Tabulate Custom: Build the string up piece-by-piece in order to + # take each charcter's width into account + chunk = reversed_chunks[-1] + i = 1 + while self._len(chunk[:i]) <= space_left: + i = i + 1 + cur_line.append(chunk[: i - 1]) + reversed_chunks[-1] = chunk[i - 1 :] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + if self.max_lines is not None: + if self.max_lines > 1: + indent = self.subsequent_indent + else: + indent = self.initial_indent + if self._len(indent) + self._len(self.placeholder.lstrip()) > self.width: + raise ValueError("placeholder too large for max width") + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - self._len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == "" and lines: + del chunks[-1] + + while chunks: + chunk_len = self._len(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + chunk_len <= width: + cur_line.append(chunks.pop()) + cur_len += chunk_len + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and self._len(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + cur_len = sum(map(self._len, cur_line)) + + # If the last chunk on this line is all whitespace, drop it. + if self.drop_whitespace and cur_line and cur_line[-1].strip() == "": + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + + if cur_line: + if ( + self.max_lines is None + or len(lines) + 1 < self.max_lines + or ( + not chunks + or self.drop_whitespace + and len(chunks) == 1 + and not chunks[0].strip() + ) + and cur_len <= width + ): + # Convert current line back to a string and store it in + # list of all lines (return value). + self._update_lines(lines, indent + "".join(cur_line)) + else: + while cur_line: + if ( + cur_line[-1].strip() + and cur_len + self._len(self.placeholder) <= width + ): + cur_line.append(self.placeholder) + self._update_lines(lines, indent + "".join(cur_line)) + break + cur_len -= self._len(cur_line[-1]) + del cur_line[-1] + else: + if lines: + prev_line = lines[-1].rstrip() + if ( + self._len(prev_line) + self._len(self.placeholder) + <= self.width + ): + lines[-1] = prev_line + self.placeholder + break + self._update_lines(lines, indent + self.placeholder.lstrip()) + break + + return lines + + +def _main(): + """\ + Usage: tabulate [options] [FILE ...] + + Pretty-print tabular data. + See also https://github.com/astanin/python-tabulate + + FILE a filename of the file with tabular data; + if "-" or missing, read data from stdin. + + Options: + + -h, --help show this message + -1, --header use the first row of data as a table header + -o FILE, --output FILE print table to FILE (default: stdout) + -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) + -F FPFMT, --float FPFMT floating point number format (default: g) + -I INTFMT, --int INTFMT integer point number format (default: "") + -f FMT, --format FMT set output table format; supported formats: + plain, simple, grid, fancy_grid, pipe, orgtbl, + rst, mediawiki, html, latex, latex_raw, + latex_booktabs, latex_longtable, tsv + (default: simple) + """ + import getopt + import sys + import textwrap + + usage = textwrap.dedent(_main.__doc__) + try: + opts, args = getopt.getopt( + sys.argv[1:], + "h1o:s:F:A:f:", + ["help", "header", "output", "sep=", "float=", "int=", "align=", "format="], + ) + except getopt.GetoptError as e: + print(e) + print(usage) + sys.exit(2) + headers = [] + floatfmt = _DEFAULT_FLOATFMT + intfmt = _DEFAULT_INTFMT + colalign = None + tablefmt = "simple" + sep = r"\s+" + outfile = "-" + for opt, value in opts: + if opt in ["-1", "--header"]: + headers = "firstrow" + elif opt in ["-o", "--output"]: + outfile = value + elif opt in ["-F", "--float"]: + floatfmt = value + elif opt in ["-I", "--int"]: + intfmt = value + elif opt in ["-C", "--colalign"]: + colalign = value.split() + elif opt in ["-f", "--format"]: + if value not in tabulate_formats: + print("%s is not a supported table format" % value) + print(usage) + sys.exit(3) + tablefmt = value + elif opt in ["-s", "--sep"]: + sep = value + elif opt in ["-h", "--help"]: + print(usage) + sys.exit(0) + files = [sys.stdin] if not args else args + with (sys.stdout if outfile == "-" else open(outfile, "w")) as out: + for f in files: + if f == "-": + f = sys.stdin + if _is_file(f): + _pprint_file( + f, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + else: + with open(f) as fobj: + _pprint_file( + fobj, + headers=headers, + tablefmt=tablefmt, + sep=sep, + floatfmt=floatfmt, + intfmt=intfmt, + file=out, + colalign=colalign, + ) + + +def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, intfmt, file, colalign): + rows = fobject.readlines() + table = [re.split(sep, r.rstrip()) for r in rows if r.strip()] + print( + tabulate( + table, + headers, + tablefmt, + floatfmt=floatfmt, + intfmt=intfmt, + colalign=colalign, + ), + file=file, + ) + + +if __name__ == "__main__": + _main() diff --git a/venv/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b5453f3daf856789b0556260a3d6ea17cc03b11 Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabulate/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc7b8aeb180eed9a750518d58526407689871eee Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabulate/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/tabulate/version.py b/venv/lib/python3.10/site-packages/tabulate/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd234cadb9062a0be1c43f22b3c0b4c10bbba5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/tabulate/version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '0.9.0' +__version_tuple__ = version_tuple = (0, 9, 0) diff --git a/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/LICENSE b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f2927f5f8147f137783bb5072794999e04655cfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2019, threadpoolctl contributors + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..6d7926f7e43d3a7b3b33e5ea3e8d201077c2bfa7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/METADATA @@ -0,0 +1,383 @@ +Metadata-Version: 2.1 +Name: threadpoolctl +Version: 3.4.0 +Summary: threadpoolctl +Home-page: https://github.com/joblib/threadpoolctl +License: BSD-3-Clause +Author: Thomas Moreau +Author-email: thomas.moreau.2010@gmail.com +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development :: Libraries :: Python Modules + +# Thread-pool Controls [![Build Status](https://dev.azure.com/joblib/threadpoolctl/_apis/build/status/joblib.threadpoolctl?branchName=master)](https://dev.azure.com/joblib/threadpoolctl/_build/latest?definitionId=1&branchName=master) [![codecov](https://codecov.io/gh/joblib/threadpoolctl/branch/master/graph/badge.svg)](https://codecov.io/gh/joblib/threadpoolctl) + +Python helpers to limit the number of threads used in the +threadpool-backed of common native libraries used for scientific +computing and data science (e.g. BLAS and OpenMP). + +Fine control of the underlying thread-pool size can be useful in +workloads that involve nested parallelism so as to mitigate +oversubscription issues. + +## Installation + +- For users, install the last published version from PyPI: + + ```bash + pip install threadpoolctl + ``` + +- For contributors, install from the source repository in developer + mode: + + ```bash + pip install -r dev-requirements.txt + flit install --symlink + ``` + + then you run the tests with pytest: + + ```bash + pytest + ``` + +## Usage + +### Command Line Interface + +Get a JSON description of thread-pools initialized when importing python +packages such as numpy or scipy for instance: + +``` +python -m threadpoolctl -i numpy scipy.linalg +[ + { + "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so", + "prefix": "libmkl_rt", + "user_api": "blas", + "internal_api": "mkl", + "version": "2019.0.4", + "num_threads": 2, + "threading_layer": "intel" + }, + { + "filepath": "/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so", + "prefix": "libiomp", + "user_api": "openmp", + "internal_api": "openmp", + "version": null, + "num_threads": 4 + } +] +``` + +The JSON information is written on STDOUT. If some of the packages are missing, +a warning message is displayed on STDERR. + +### Python Runtime Programmatic Introspection + +Introspect the current state of the threadpool-enabled runtime libraries +that are loaded when importing Python packages: + +```python +>>> from threadpoolctl import threadpool_info +>>> from pprint import pprint +>>> pprint(threadpool_info()) +[] + +>>> import numpy +>>> pprint(threadpool_info()) +[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so', + 'internal_api': 'mkl', + 'num_threads': 2, + 'prefix': 'libmkl_rt', + 'threading_layer': 'intel', + 'user_api': 'blas', + 'version': '2019.0.4'}, + {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so', + 'internal_api': 'openmp', + 'num_threads': 4, + 'prefix': 'libiomp', + 'user_api': 'openmp', + 'version': None}] + +>>> import xgboost +>>> pprint(threadpool_info()) +[{'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libmkl_rt.so', + 'internal_api': 'mkl', + 'num_threads': 2, + 'prefix': 'libmkl_rt', + 'threading_layer': 'intel', + 'user_api': 'blas', + 'version': '2019.0.4'}, + {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libiomp5.so', + 'internal_api': 'openmp', + 'num_threads': 4, + 'prefix': 'libiomp', + 'user_api': 'openmp', + 'version': None}, + {'filepath': '/home/ogrisel/miniconda3/envs/tmp/lib/libgomp.so.1.0.0', + 'internal_api': 'openmp', + 'num_threads': 4, + 'prefix': 'libgomp', + 'user_api': 'openmp', + 'version': None}] +``` + +In the above example, `numpy` was installed from the default anaconda channel and comes +with MKL and its Intel OpenMP (`libiomp5`) implementation while `xgboost` was installed +from pypi.org and links against GNU OpenMP (`libgomp`) so both OpenMP runtimes are +loaded in the same Python program. + +The state of these libraries is also accessible through the object oriented API: + +```python +>>> from threadpoolctl import ThreadpoolController, threadpool_info +>>> from pprint import pprint +>>> import numpy +>>> controller = ThreadpoolController() +>>> pprint(controller.info()) +[{'architecture': 'Haswell', + 'filepath': '/home/jeremie/miniconda/envs/dev/lib/libopenblasp-r0.3.17.so', + 'internal_api': 'openblas', + 'num_threads': 4, + 'prefix': 'libopenblas', + 'threading_layer': 'pthreads', + 'user_api': 'blas', + 'version': '0.3.17'}] + +>>> controller.info() == threadpool_info() +True +``` + +### Setting the Maximum Size of Thread-Pools + +Control the number of threads used by the underlying runtime libraries +in specific sections of your Python program: + +```python +>>> from threadpoolctl import threadpool_limits +>>> import numpy as np + +>>> with threadpool_limits(limits=1, user_api='blas'): +... # In this block, calls to blas implementation (like openblas or MKL) +... # will be limited to use only one thread. They can thus be used jointly +... # with thread-parallelism. +... a = np.random.randn(1000, 1000) +... a_squared = a @ a +``` + +The threadpools can also be controlled via the object oriented API, which is especially +useful to avoid searching through all the loaded shared libraries each time. It will +however not act on libraries loaded after the instantiation of the +`ThreadpoolController`: + +```python +>>> from threadpoolctl import ThreadpoolController +>>> import numpy as np +>>> controller = ThreadpoolController() + +>>> with controller.limit(limits=1, user_api='blas'): +... a = np.random.randn(1000, 1000) +... a_squared = a @ a +``` + +### Restricting the limits to the scope of a function + +`threadpool_limits` and `ThreadpoolController` can also be used as decorators to set +the maximum number of threads used by the supported libraries at a function level. The +decorators are accessible through their `wrap` method: + +```python +>>> from threadpoolctl import ThreadpoolController, threadpool_limits +>>> import numpy as np +>>> controller = ThreadpoolController() + +>>> @controller.wrap(limits=1, user_api='blas') +... # or @threadpool_limits.wrap(limits=1, user_api='blas') +... def my_func(): +... # Inside this function, calls to blas implementation (like openblas or MKL) +... # will be limited to use only one thread. +... a = np.random.randn(1000, 1000) +... a_squared = a @ a +... +``` + +### Switching the FlexiBLAS backend + +`FlexiBLAS` is a BLAS wrapper for which the BLAS backend can be switched at runtime. +`threadpoolctl` exposes python bindings for this feature. Here's an example but note +that this part of the API is experimental and subject to change without deprecation: + +```python +>>> from threadpoolctl import ThreadpoolController +>>> import numpy as np +>>> controller = ThreadpoolController() + +>>> controller.info() +[{'user_api': 'blas', + 'internal_api': 'flexiblas', + 'num_threads': 1, + 'prefix': 'libflexiblas', + 'filepath': '/usr/local/lib/libflexiblas.so.3.3', + 'version': '3.3.1', + 'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'], + 'loaded_backends': ['NETLIB'], + 'current_backend': 'NETLIB'}] + +# Retrieve the flexiblas controller +>>> flexiblas_ct = controller.select(internal_api="flexiblas").lib_controllers[0] + +# Switch the backend with one predefined at build time (listed in "available_backends") +>>> flexiblas_ct.switch_backend("OPENBLASPTHREAD") +>>> controller.info() +[{'user_api': 'blas', + 'internal_api': 'flexiblas', + 'num_threads': 4, + 'prefix': 'libflexiblas', + 'filepath': '/usr/local/lib/libflexiblas.so.3.3', + 'version': '3.3.1', + 'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'], + 'loaded_backends': ['NETLIB', 'OPENBLASPTHREAD'], + 'current_backend': 'OPENBLASPTHREAD'}, + {'user_api': 'blas', + 'internal_api': 'openblas', + 'num_threads': 4, + 'prefix': 'libopenblas', + 'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.8.so', + 'version': '0.3.8', + 'threading_layer': 'pthreads', + 'architecture': 'Haswell'}] + +# It's also possible to directly give the path to a shared library +>>> flexiblas_controller.switch_backend("/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so") +>>> controller.info() +[{'user_api': 'blas', + 'internal_api': 'flexiblas', + 'num_threads': 2, + 'prefix': 'libflexiblas', + 'filepath': '/usr/local/lib/libflexiblas.so.3.3', + 'version': '3.3.1', + 'available_backends': ['NETLIB', 'OPENBLASPTHREAD', 'ATLAS'], + 'loaded_backends': ['NETLIB', + 'OPENBLASPTHREAD', + '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so'], + 'current_backend': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so'}, + {'user_api': 'openmp', + 'internal_api': 'openmp', + 'num_threads': 4, + 'prefix': 'libomp', + 'filepath': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libomp.so', + 'version': None}, + {'user_api': 'blas', + 'internal_api': 'openblas', + 'num_threads': 4, + 'prefix': 'libopenblas', + 'filepath': '/usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.8.so', + 'version': '0.3.8', + 'threading_layer': 'pthreads', + 'architecture': 'Haswell'}, + {'user_api': 'blas', + 'internal_api': 'mkl', + 'num_threads': 2, + 'prefix': 'libmkl_rt', + 'filepath': '/home/jeremie/miniforge/envs/flexiblas_threadpoolctl/lib/libmkl_rt.so.2', + 'version': '2024.0-Product', + 'threading_layer': 'gnu'}] +``` + +You can observe that the previously linked OpenBLAS shared object stays loaded by +the Python program indefinitely, but FlexiBLAS itself no longer delegates BLAS calls +to OpenBLAS as indicated by the `current_backend` attribute. +### Writing a custom library controller + +Currently, `threadpoolctl` has support for `OpenMP` and the main `BLAS` libraries. +However it can also be used to control the threadpool of other native libraries, +provided that they expose an API to get and set the limit on the number of threads. +For that, one must implement a controller for this library and register it to +`threadpoolctl`. + +A custom controller must be a subclass of the `LibController` class and implement +the attributes and methods described in the docstring of `LibController`. Then this +new controller class must be registered using the `threadpoolctl.register` function. +An complete example can be found [here]( + https://github.com/joblib/threadpoolctl/blob/master/tests/_pyMylib/__init__.py). + +### Sequential BLAS within OpenMP parallel region + +When one wants to have sequential BLAS calls within an OpenMP parallel region, it's +safer to set `limits="sequential_blas_under_openmp"` since setting `limits=1` and +`user_api="blas"` might not lead to the expected behavior in some configurations +(e.g. OpenBLAS with the OpenMP threading layer +https://github.com/xianyi/OpenBLAS/issues/2985). + +### Known Limitations + +- `threadpool_limits` can fail to limit the number of inner threads when nesting + parallel loops managed by distinct OpenMP runtime implementations (for instance + libgomp from GCC and libomp from clang/llvm or libiomp from ICC). + + See the `test_openmp_nesting` function in [tests/test_threadpoolctl.py]( + https://github.com/joblib/threadpoolctl/blob/master/tests/test_threadpoolctl.py) + for an example. More information can be found at: + https://github.com/jeremiedbb/Nested_OpenMP + + Note however that this problem does not happen when `threadpool_limits` is + used to limit the number of threads used internally by BLAS calls that are + themselves nested under OpenMP parallel loops. `threadpool_limits` works as + expected, even if the inner BLAS implementation relies on a distinct OpenMP + implementation. + +- Using Intel OpenMP (ICC) and LLVM OpenMP (clang) in the same Python program + under Linux is known to cause problems. See the following guide for more details + and workarounds: + https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md + +- Setting the maximum number of threads of the OpenMP and BLAS libraries has a global + effect and impacts the whole Python process. There is no thread level isolation as + these libraries do not offer thread-local APIs to configure the number of threads to + use in nested parallel calls. + + +## Maintainers + +To make a release: + +Bump the version number (`__version__`) in `threadpoolctl.py`. + +Build the distribution archives: + +```bash +pip install flit +flit build +``` + +Check the contents of `dist/`. + +If everything is fine, make a commit for the release, tag it, push the +tag to github and then: + +```bash +flit publish +``` + +### Credits + +The initial dynamic library introspection code was written by @anton-malakhov +for the smp package available at https://github.com/IntelPython/smp . + +threadpoolctl extends this for other operating systems. Contrary to smp, +threadpoolctl does not attempt to limit the size of Python multiprocessing +pools (threads or processes) or set operating system-level CPU affinity +constraints: threadpoolctl only interacts with native libraries via their +public runtime APIs. + diff --git a/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..84ffc3b1ef6fff86f795014525b5acc89061e39f --- /dev/null +++ b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/RECORD @@ -0,0 +1,7 @@ +__pycache__/threadpoolctl.cpython-310.pyc,, +threadpoolctl-3.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +threadpoolctl-3.4.0.dist-info/LICENSE,sha256=gaxhkHUkiwblNmC2UtEOSF9GdfXQrg-X6iI3DaH34js,1507 +threadpoolctl-3.4.0.dist-info/METADATA,sha256=WmvFYjEeyrhqDdncJ4DhdmIZVv3MqUNNoHf67cuImGI,13249 +threadpoolctl-3.4.0.dist-info/RECORD,, +threadpoolctl-3.4.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +threadpoolctl.py,sha256=LU6jh8Vejwl17kgLDxenbZXK2asvUc2AjTV-HERwx40,49753 diff --git a/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/threadpoolctl-3.4.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c84a47288f8442851788d20022245894452bdeab --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 EleutherAI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..43936014fd5ea9256869918b3576c3583974270f --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA @@ -0,0 +1,118 @@ +Metadata-Version: 2.1 +Name: tqdm-multiprocess +Version: 0.0.11 +Summary: Easy multiprocessing with tqdm and logging redirected to main process. +Home-page: https://github.com/EleutherAI/tqdm-multiprocess +Author: researcher2 +Author-email: 2researcher2@gmail.com +License: UNKNOWN +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +Requires-Dist: tqdm +Requires-Dist: colorama +Provides-Extra: dev +Requires-Dist: twine ; extra == 'dev' + +# tqdm-multiprocess +Using queues, tqdm-multiprocess supports multiple worker processes, each with multiple tqdm progress bars, displaying them cleanly through the main process. The worker processes also have access to a single global tqdm for aggregate progress monitoring. + +Logging is also redirected from the subprocesses to the root logger in the main process. + +Currently doesn't support tqdm(iterator), you will need to intialize your worker tqdms with a total and update manually. + +Due to the performance limits of the default Python multiprocess queue you need to update your global and worker process tqdms infrequently to avoid flooding the main process. I will attempt to implement a lock free ringbuffer at some point to see if things can be improved. + +## Installation + +```bash +pip install tqdm-multiprocess +``` + +## Usage + +*TqdmMultiProcessPool* creates a standard python multiprocessing pool with the desired number of processes. Under the hood it uses async_apply with an event loop to monitor a tqdm and logging queue, allowing the worker processes to redirect both their tqdm objects and logging messages to your main process. There is also a queue for the workers to update the single global tqdm. + +As shown below, you create a list of tasks containing their function and a tuple with your parameters. The functions you pass in will need the extra arguments on the end "tqdm_func, global_tqdm". You must use tqdm_func when initializing your tqdms for the redirection to work. As mentioned above, passing iterators into the tqdm function is currently not supported, so set total=total_steps when setting up your tqdm, and then update the progress manually with the update() method. All other arguments to tqdm should work fine. + +Once you have your task list, call the map() method on your pool, passing in the process count, global_tqdm (or None), task list, as well as error and done callback functions. The error callback will be trigerred if your task functions return anything evaluating as False (if not task_result in the source code). The done callback will be called when the task succesfully completes. + +The map method returns a list containing the returned results for all your tasks in original order. + +### examples/basic_example.py + +```python +from time import sleep +import multiprocessing +import tqdm + +import logging +from tqdm_multiprocess.logger import setup_logger_tqdm +logger = logging.getLogger(__name__) + +from tqdm_multiprocess import TqdmMultiProcessPool + +iterations1 = 100 +iterations2 = 5 +iterations3 = 2 +def some_other_function(tqdm_func, global_tqdm): + + total_iterations = iterations1 * iterations2 * iterations3 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress3: + progress3.set_description("outer") + for i in range(iterations3): + logger.info("outer") + total_iterations = iterations1 * iterations2 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress2: + progress2.set_description("middle") + for j in range(iterations2): + logger.info("middle") + #for k in tqdm_func(range(iterations1), dynamic_ncols=True, desc="inner"): + with tqdm_func(total=iterations1, dynamic_ncols=True) as progress1: + for j in range(iterations1): + # logger.info("inner") # Spam slows down tqdm too much + progress1.set_description("innert") + sleep(0.01) + progress1.update() + progress2.update() + progress3.update() + global_tqdm.update() + + logger.warning(f"Warning test message. {multiprocessing.current_process().name}") + logger.error(f"Error test message. {multiprocessing.current_process().name}") + + +# Multiprocessed +def example_multiprocessing_function(some_input, tqdm_func, global_tqdm): + logger.debug(f"Debug test message - I won't show up in console. {multiprocessing.current_process().name}") + logger.info(f"Info test message. {multiprocessing.current_process().name}") + some_other_function(tqdm_func, global_tqdm) + return True + +def error_callback(result): + print("Error!") + +def done_callback(result): + print("Done. Result: ", result) + +def example(): + pool = TqdmMultiProcessPool() + process_count = 4 + task_count = 10 + initial_tasks = [(example_multiprocessing_function, (i,)) for i in range(task_count)] + total_iterations = iterations1 * iterations2 * iterations3 * task_count + with tqdm.tqdm(total=total_iterations, dynamic_ncols=True) as global_progress: + global_progress.set_description("global") + results = pool.map(process_count, global_progress, initial_tasks, error_callback, done_callback) + print(results) + +if __name__ == '__main__': + logfile_path = "tqdm_multiprocessing_example.log" + setup_logger_tqdm(logfile_path) # Logger will write messages using tqdm.write + example() +``` + + diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cfa85245b1b2db3d2f2f995a783c0fdb6f84864c --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD @@ -0,0 +1,18 @@ +examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +examples/__pycache__/__init__.cpython-310.pyc,, +examples/__pycache__/basic_example.cpython-310.pyc,, +examples/basic_example.py,sha256=BXEyjPzHVxUVtD2g5A-xeZOtzRY10jiyuITTRqI67n8,2463 +tqdm_multiprocess-0.0.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tqdm_multiprocess-0.0.11.dist-info/LICENSE,sha256=TLAo1IEUDiwrQ9q89Bt-tJmmCon8eSsGrAZAzpwLguo,1088 +tqdm_multiprocess-0.0.11.dist-info/METADATA,sha256=BlWIKFVIP1eqTciC9JJO15VSoGMf70M9nWAWu8Oiq64,5679 +tqdm_multiprocess-0.0.11.dist-info/RECORD,, +tqdm_multiprocess-0.0.11.dist-info/WHEEL,sha256=D1Wh14kWDxPnrM-5t_6UCB-UuQNrEODtRa3vF4OsvQY,97 +tqdm_multiprocess-0.0.11.dist-info/top_level.txt,sha256=84yOpK8vPnmQ1iMs3w7vmZuUFMI2t62KewLi_i7CniE,18 +tqdm_multiprocess/__init__.py,sha256=Iv5LxLnGnJfTkeg12f6EveHdejESd6Y15AW1iLi3Z5U,39 +tqdm_multiprocess/__pycache__/__init__.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/logger.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/std.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/tqdm_multiprocess.cpython-310.pyc,, +tqdm_multiprocess/logger.py,sha256=d13GtAC4b1W-i0sKEYfh6sxEEQ-1vk8KItoWCqUKs6I,2919 +tqdm_multiprocess/std.py,sha256=csXggdcU488MxpPl-O1oByWQxQYRrImokk4nMyWKIO8,8006 +tqdm_multiprocess/tqdm_multiprocess.py,sha256=jZFLI3qyWnl5q4aABWWJjG4-_8OshpmPQfFPHOPqbOA,5712 diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a9d82522f6d5852d975d031fa3d5791ced4d738a --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.35.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..da91afe6090b269413fa65ecce26becf0dc8a003 --- /dev/null +++ b/venv/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt @@ -0,0 +1 @@ +tqdm_multiprocess