diff --git a/ckpts/hf_ckpt/global_step40/tokenizer.model b/ckpts/hf_ckpt/global_step40/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/ckpts/hf_ckpt/global_step40/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/ckpts/hf_ckpt/tokenizer.model b/ckpts/hf_ckpt/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..6c00c742ce03c627d6cd5b795984876fa49fa899
--- /dev/null
+++ b/ckpts/hf_ckpt/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
+size 499723
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..56e942902a96e7f012479a582c5cf89511219f9a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA
@@ -0,0 +1,105 @@
+Metadata-Version: 2.1
+Name: Jinja2
+Version: 3.1.3
+Summary: A very fast and expressive template engine.
+Home-page: https://palletsprojects.com/p/jinja/
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Documentation, https://jinja.palletsprojects.com/
+Project-URL: Changes, https://jinja.palletsprojects.com/changes/
+Project-URL: Source Code, https://github.com/pallets/jinja/
+Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/
+Project-URL: Chat, https://discord.gg/pallets
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+License-File: LICENSE.rst
+Requires-Dist: MarkupSafe >=2.0
+Provides-Extra: i18n
+Requires-Dist: Babel >=2.7 ; extra == 'i18n'
+
+Jinja
+=====
+
+Jinja is a fast, expressive, extensible templating engine. Special
+placeholders in the template allow writing code similar to Python
+syntax. Then the template is passed data to render the final document.
+
+It includes:
+
+- Template inheritance and inclusion.
+- Define and import macros within templates.
+- HTML templates can use autoescaping to prevent XSS from untrusted
+ user input.
+- A sandboxed environment can safely render untrusted templates.
+- AsyncIO support for generating templates and calling async
+ functions.
+- I18N support with Babel.
+- Templates are compiled to optimized Python code just-in-time and
+ cached, or can be compiled ahead-of-time.
+- Exceptions point to the correct line in templates to make debugging
+ easier.
+- Extensible filters, tests, functions, and even syntax.
+
+Jinja's philosophy is that while application logic belongs in Python if
+possible, it shouldn't make the template designer's job difficult by
+restricting functionality too much.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ $ pip install -U Jinja2
+
+.. _pip: https://pip.pypa.io/en/stable/getting-started/
+
+
+In A Nutshell
+-------------
+
+.. code-block:: jinja
+
+ {% extends "base.html" %}
+ {% block title %}Members{% endblock %}
+ {% block content %}
+
+ {% endblock %}
+
+
+Donate
+------
+
+The Pallets organization develops and supports Jinja and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Documentation: https://jinja.palletsprojects.com/
+- Changes: https://jinja.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/Jinja2/
+- Source Code: https://github.com/pallets/jinja/
+- Issue Tracker: https://github.com/pallets/jinja/issues/
+- Chat: https://discord.gg/pallets
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..8a511ac007aa6ef6cabad0bdb17d166d1398c11d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD
@@ -0,0 +1,58 @@
+Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
+Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301
+Jinja2-3.1.3.dist-info/RECORD,,
+Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
+Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59
+Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927
+jinja2/__pycache__/__init__.cpython-310.pyc,,
+jinja2/__pycache__/_identifier.cpython-310.pyc,,
+jinja2/__pycache__/async_utils.cpython-310.pyc,,
+jinja2/__pycache__/bccache.cpython-310.pyc,,
+jinja2/__pycache__/compiler.cpython-310.pyc,,
+jinja2/__pycache__/constants.cpython-310.pyc,,
+jinja2/__pycache__/debug.cpython-310.pyc,,
+jinja2/__pycache__/defaults.cpython-310.pyc,,
+jinja2/__pycache__/environment.cpython-310.pyc,,
+jinja2/__pycache__/exceptions.cpython-310.pyc,,
+jinja2/__pycache__/ext.cpython-310.pyc,,
+jinja2/__pycache__/filters.cpython-310.pyc,,
+jinja2/__pycache__/idtracking.cpython-310.pyc,,
+jinja2/__pycache__/lexer.cpython-310.pyc,,
+jinja2/__pycache__/loaders.cpython-310.pyc,,
+jinja2/__pycache__/meta.cpython-310.pyc,,
+jinja2/__pycache__/nativetypes.cpython-310.pyc,,
+jinja2/__pycache__/nodes.cpython-310.pyc,,
+jinja2/__pycache__/optimizer.cpython-310.pyc,,
+jinja2/__pycache__/parser.cpython-310.pyc,,
+jinja2/__pycache__/runtime.cpython-310.pyc,,
+jinja2/__pycache__/sandbox.cpython-310.pyc,,
+jinja2/__pycache__/tests.cpython-310.pyc,,
+jinja2/__pycache__/utils.cpython-310.pyc,,
+jinja2/__pycache__/visitor.cpython-310.pyc,,
+jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958
+jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447
+jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061
+jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199
+jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433
+jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299
+jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267
+jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253
+jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071
+jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844
+jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862
+jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704
+jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726
+jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085
+jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396
+jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210
+jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550
+jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650
+jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736
+jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406
+jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584
+jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905
+jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933
+jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..98c0d20b7a64f4f998d7913e1d38a05dba20916c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7b9666c8ea311ea0f0cfe7bed861aaa5469f92bb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[babel.extractors]
+jinja2 = jinja2.ext:babel_extract[i18n]
diff --git a/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinja2
diff --git a/venv/lib/python3.10/site-packages/filelock/__init__.py b/venv/lib/python3.10/site-packages/filelock/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..006299d2188c5d642cf78cf19305e88c95ee01cb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/__init__.py
@@ -0,0 +1,52 @@
+"""
+A platform independent file lock that supports the with-statement.
+
+.. autodata:: filelock.__version__
+ :no-value:
+
+"""
+
+from __future__ import annotations
+
+import sys
+import warnings
+from typing import TYPE_CHECKING
+
+from ._api import AcquireReturnProxy, BaseFileLock
+from ._error import Timeout
+from ._soft import SoftFileLock
+from ._unix import UnixFileLock, has_fcntl
+from ._windows import WindowsFileLock
+from .version import version
+
+#: version of the project as a string
+__version__: str = version
+
+
+if sys.platform == "win32": # pragma: win32 cover
+ _FileLock: type[BaseFileLock] = WindowsFileLock
+else: # pragma: win32 no cover # noqa: PLR5501
+ if has_fcntl:
+ _FileLock: type[BaseFileLock] = UnixFileLock
+ else:
+ _FileLock = SoftFileLock
+ if warnings is not None:
+ warnings.warn("only soft file lock is available", stacklevel=2)
+
+if TYPE_CHECKING:
+ FileLock = SoftFileLock
+else:
+ #: Alias for the lock, which should be used for the current platform.
+ FileLock = _FileLock
+
+
+__all__ = [
+ "AcquireReturnProxy",
+ "BaseFileLock",
+ "FileLock",
+ "SoftFileLock",
+ "Timeout",
+ "UnixFileLock",
+ "WindowsFileLock",
+ "__version__",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc75c66dd12c2dfeaa7c8253e92d6efc44ecc28a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5fd221130572761e95b1d9d6a528046e71ff15d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb7e1c1eada0f239f8a9304324581d656accd9ae
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d818a4c4aede340a56d0b4563977cc11d6bd8cb6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e4d77b3883063203f59ebc5ba32e5e0869b10c4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a161399214b1738212de1eec50da1e9c48577933
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..887f6191706deb4bdf55e750c831026462ba115d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97bb81dd908e18b2e670339bbd005f857b9f66c1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/filelock/_api.py b/venv/lib/python3.10/site-packages/filelock/_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..210b8c4138d70ac989401d9ca3fd6ab03e36c6eb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_api.py
@@ -0,0 +1,341 @@
+from __future__ import annotations
+
+import contextlib
+import logging
+import os
+import time
+import warnings
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from threading import local
+from typing import TYPE_CHECKING, Any
+from weakref import WeakValueDictionary
+
+from ._error import Timeout
+
+if TYPE_CHECKING:
+ import sys
+ from types import TracebackType
+
+ if sys.version_info >= (3, 11): # pragma: no cover (py311+)
+ from typing import Self
+ else: # pragma: no cover ( None:
+ self.lock = lock
+
+ def __enter__(self) -> BaseFileLock:
+ return self.lock
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.lock.release()
+
+
+@dataclass
+class FileLockContext:
+ """A dataclass which holds the context for a ``BaseFileLock`` object."""
+
+ # The context is held in a separate class to allow optional use of thread local storage via the
+ # ThreadLocalFileContext class.
+
+ #: The path to the lock file.
+ lock_file: str
+
+ #: The default timeout value.
+ timeout: float
+
+ #: The mode for the lock files
+ mode: int
+
+ #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held
+ lock_file_fd: int | None = None
+
+ #: The lock counter is used for implementing the nested locking mechanism.
+ lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0
+
+
+class ThreadLocalFileContext(FileLockContext, local):
+ """A thread local version of the ``FileLockContext`` class."""
+
+
+class BaseFileLock(ABC, contextlib.ContextDecorator):
+ """Abstract base class for a file lock object."""
+
+ _instances: WeakValueDictionary[str, BaseFileLock]
+
+ def __new__( # noqa: PLR0913
+ cls,
+ lock_file: str | os.PathLike[str],
+ timeout: float = -1,
+ mode: int = 0o644,
+ thread_local: bool = True, # noqa: ARG003, FBT001, FBT002
+ *,
+ is_singleton: bool = False,
+ **kwargs: dict[str, Any], # capture remaining kwargs for subclasses # noqa: ARG003
+ ) -> Self:
+ """Create a new lock object or if specified return the singleton instance for the lock file."""
+ if not is_singleton:
+ return super().__new__(cls)
+
+ instance = cls._instances.get(str(lock_file))
+ if not instance:
+ instance = super().__new__(cls)
+ cls._instances[str(lock_file)] = instance
+ elif timeout != instance.timeout or mode != instance.mode:
+ msg = "Singleton lock instances cannot be initialized with differing arguments"
+ raise ValueError(msg)
+
+ return instance # type: ignore[return-value] # https://github.com/python/mypy/issues/15322
+
+ def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None:
+ """Setup unique state for lock subclasses."""
+ super().__init_subclass__(**kwargs)
+ cls._instances = WeakValueDictionary()
+
+ def __init__( # noqa: PLR0913
+ self,
+ lock_file: str | os.PathLike[str],
+ timeout: float = -1,
+ mode: int = 0o644,
+ thread_local: bool = True, # noqa: FBT001, FBT002
+ *,
+ is_singleton: bool = False,
+ ) -> None:
+ """
+ Create a new lock object.
+
+ :param lock_file: path to the file
+ :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \
+ the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \
+ to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock.
+ :param mode: file permissions for the lockfile
+ :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \
+ ``False`` then the lock will be reentrant across threads.
+ :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \
+ per lock file. This is useful if you want to use the lock object for reentrant locking without needing \
+ to pass the same object around.
+
+ """
+ self._is_thread_local = thread_local
+ self._is_singleton = is_singleton
+
+ # Create the context. Note that external code should not work with the context directly and should instead use
+ # properties of this class.
+ kwargs: dict[str, Any] = {
+ "lock_file": os.fspath(lock_file),
+ "timeout": timeout,
+ "mode": mode,
+ }
+ self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs)
+
+ def is_thread_local(self) -> bool:
+ """:return: a flag indicating if this lock is thread local or not"""
+ return self._is_thread_local
+
+ @property
+ def is_singleton(self) -> bool:
+ """:return: a flag indicating if this lock is singleton or not"""
+ return self._is_singleton
+
+ @property
+ def lock_file(self) -> str:
+ """:return: path to the lock file"""
+ return self._context.lock_file
+
+ @property
+ def timeout(self) -> float:
+ """
+ :return: the default timeout value, in seconds
+
+ .. versionadded:: 2.0.0
+ """
+ return self._context.timeout
+
+ @timeout.setter
+ def timeout(self, value: float | str) -> None:
+ """
+ Change the default timeout value.
+
+ :param value: the new value, in seconds
+
+ """
+ self._context.timeout = float(value)
+
+ @property
+ def mode(self) -> int:
+ """:return: the file permissions for the lockfile"""
+ return self._context.mode
+
+ @abstractmethod
+ def _acquire(self) -> None:
+ """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def _release(self) -> None:
+ """Releases the lock and sets self._context.lock_file_fd to None."""
+ raise NotImplementedError
+
+ @property
+ def is_locked(self) -> bool:
+ """
+
+ :return: A boolean indicating if the lock file is holding the lock currently.
+
+ .. versionchanged:: 2.0.0
+
+ This was previously a method and is now a property.
+ """
+ return self._context.lock_file_fd is not None
+
+ @property
+ def lock_counter(self) -> int:
+ """:return: The number of times this lock has been acquired (but not yet released)."""
+ return self._context.lock_counter
+
+ def acquire(
+ self,
+ timeout: float | None = None,
+ poll_interval: float = 0.05,
+ *,
+ poll_intervall: float | None = None,
+ blocking: bool = True,
+ ) -> AcquireReturnProxy:
+ """
+ Try to acquire the file lock.
+
+ :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and
+ if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired
+ :param poll_interval: interval of trying to acquire the lock file
+ :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead
+ :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
+ first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
+ :raises Timeout: if fails to acquire lock within the timeout period
+ :return: a context object that will unlock the file when the context is exited
+
+ .. code-block:: python
+
+ # You can use this method in the context manager (recommended)
+ with lock.acquire():
+ pass
+
+ # Or use an equivalent try-finally construct:
+ lock.acquire()
+ try:
+ pass
+ finally:
+ lock.release()
+
+ .. versionchanged:: 2.0.0
+
+ This method returns now a *proxy* object instead of *self*,
+ so that it can be used in a with statement without side effects.
+
+ """
+ # Use the default timeout, if no timeout is provided.
+ if timeout is None:
+ timeout = self._context.timeout
+
+ if poll_intervall is not None:
+ msg = "use poll_interval instead of poll_intervall"
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ poll_interval = poll_intervall
+
+ # Increment the number right at the beginning. We can still undo it, if something fails.
+ self._context.lock_counter += 1
+
+ lock_id = id(self)
+ lock_filename = self.lock_file
+ start_time = time.perf_counter()
+ try:
+ while True:
+ if not self.is_locked:
+ _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
+ self._acquire()
+ if self.is_locked:
+ _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
+ break
+ if blocking is False:
+ _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
+ raise Timeout(lock_filename) # noqa: TRY301
+ if 0 <= timeout < time.perf_counter() - start_time:
+ _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
+ raise Timeout(lock_filename) # noqa: TRY301
+ msg = "Lock %s not acquired on %s, waiting %s seconds ..."
+ _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
+ time.sleep(poll_interval)
+ except BaseException: # Something did go wrong, so decrement the counter.
+ self._context.lock_counter = max(0, self._context.lock_counter - 1)
+ raise
+ return AcquireReturnProxy(lock=self)
+
+ def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002
+ """
+ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0.
+ Also note, that the lock file itself is not automatically deleted.
+
+ :param force: If true, the lock counter is ignored and the lock is released in every case/
+
+ """
+ if self.is_locked:
+ self._context.lock_counter -= 1
+
+ if self._context.lock_counter == 0 or force:
+ lock_id, lock_filename = id(self), self.lock_file
+
+ _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
+ self._release()
+ self._context.lock_counter = 0
+ _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
+
+ def __enter__(self) -> Self:
+ """
+ Acquire the lock.
+
+ :return: the lock object
+
+ """
+ self.acquire()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ """
+ Release the lock.
+
+ :param exc_type: the exception type if raised
+ :param exc_value: the exception value if raised
+ :param traceback: the exception traceback if raised
+
+ """
+ self.release()
+
+ def __del__(self) -> None:
+ """Called when the lock object is deleted."""
+ self.release(force=True)
+
+
+__all__ = [
+ "AcquireReturnProxy",
+ "BaseFileLock",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/_error.py b/venv/lib/python3.10/site-packages/filelock/_error.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7ff08c0f508ad7077eb6ed1990898840c952b3a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_error.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from typing import Any
+
+
+class Timeout(TimeoutError): # noqa: N818
+ """Raised when the lock could not be acquired in *timeout* seconds."""
+
+ def __init__(self, lock_file: str) -> None:
+ super().__init__()
+ self._lock_file = lock_file
+
+ def __reduce__(self) -> str | tuple[Any, ...]:
+ return self.__class__, (self._lock_file,) # Properly pickle the exception
+
+ def __str__(self) -> str:
+ return f"The file lock '{self._lock_file}' could not be acquired."
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self.lock_file!r})"
+
+ @property
+ def lock_file(self) -> str:
+ """:return: The path of the file lock."""
+ return self._lock_file
+
+
+__all__ = [
+ "Timeout",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/_soft.py b/venv/lib/python3.10/site-packages/filelock/_soft.py
new file mode 100644
index 0000000000000000000000000000000000000000..28c67f74cc82b8f55e47afd6a71972cc1fb95eb6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_soft.py
@@ -0,0 +1,47 @@
+from __future__ import annotations
+
+import os
+import sys
+from contextlib import suppress
+from errno import EACCES, EEXIST
+from pathlib import Path
+
+from ._api import BaseFileLock
+from ._util import ensure_directory_exists, raise_on_not_writable_file
+
+
+class SoftFileLock(BaseFileLock):
+ """Simply watches the existence of the lock file."""
+
+ def _acquire(self) -> None:
+ raise_on_not_writable_file(self.lock_file)
+ ensure_directory_exists(self.lock_file)
+ # first check for exists and read-only mode as the open will mask this case as EEXIST
+ flags = (
+ os.O_WRONLY # open for writing only
+ | os.O_CREAT
+ | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists
+ | os.O_TRUNC # truncate the file to zero byte
+ )
+ try:
+ file_handler = os.open(self.lock_file, flags, self._context.mode)
+ except OSError as exception: # re-raise unless expected exception
+ if not (
+ exception.errno == EEXIST # lock already exist
+ or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock
+ ): # pragma: win32 no cover
+ raise
+ else:
+ self._context.lock_file_fd = file_handler
+
+ def _release(self) -> None:
+ assert self._context.lock_file_fd is not None # noqa: S101
+ os.close(self._context.lock_file_fd) # the lock file is definitely not None
+ self._context.lock_file_fd = None
+ with suppress(OSError): # the file is already deleted and that's what we want
+ Path(self.lock_file).unlink()
+
+
+__all__ = [
+ "SoftFileLock",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/_unix.py b/venv/lib/python3.10/site-packages/filelock/_unix.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ae1fbe916f95762418cd62251f91f74ba35fc8c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_unix.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import os
+import sys
+from contextlib import suppress
+from errno import ENOSYS
+from pathlib import Path
+from typing import cast
+
+from ._api import BaseFileLock
+from ._util import ensure_directory_exists
+
+#: a flag to indicate if the fcntl API is available
+has_fcntl = False
+if sys.platform == "win32": # pragma: win32 cover
+
+ class UnixFileLock(BaseFileLock):
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
+
+ def _acquire(self) -> None:
+ raise NotImplementedError
+
+ def _release(self) -> None:
+ raise NotImplementedError
+
+else: # pragma: win32 no cover
+ try:
+ import fcntl
+ except ImportError:
+ pass
+ else:
+ has_fcntl = True
+
+ class UnixFileLock(BaseFileLock):
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
+
+ def _acquire(self) -> None:
+ ensure_directory_exists(self.lock_file)
+ open_flags = os.O_RDWR | os.O_TRUNC
+ if not Path(self.lock_file).exists():
+ open_flags |= os.O_CREAT
+ fd = os.open(self.lock_file, open_flags, self._context.mode)
+ with suppress(PermissionError): # This locked is not owned by this UID
+ os.fchmod(fd, self._context.mode)
+ try:
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except OSError as exception:
+ os.close(fd)
+ if exception.errno == ENOSYS: # NotImplemented error
+ msg = "FileSystem does not appear to support flock; use SoftFileLock instead"
+ raise NotImplementedError(msg) from exception
+ else:
+ self._context.lock_file_fd = fd
+
+ def _release(self) -> None:
+ # Do not remove the lockfile:
+ # https://github.com/tox-dev/py-filelock/issues/31
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
+ fd = cast(int, self._context.lock_file_fd)
+ self._context.lock_file_fd = None
+ fcntl.flock(fd, fcntl.LOCK_UN)
+ os.close(fd)
+
+
+__all__ = [
+ "UnixFileLock",
+ "has_fcntl",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/_util.py b/venv/lib/python3.10/site-packages/filelock/_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..c671e8533873948f0e1b5575ff952c722019f067
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_util.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import os
+import stat
+import sys
+from errno import EACCES, EISDIR
+from pathlib import Path
+
+
+def raise_on_not_writable_file(filename: str) -> None:
+ """
+ Raise an exception if attempting to open the file for writing would fail.
+
+ This is done so files that will never be writable can be separated from files that are writable but currently
+ locked.
+
+ :param filename: file to check
+ :raises OSError: as if the file was opened for writing.
+
+ """
+ try: # use stat to do exists + can write to check without race condition
+ file_stat = os.stat(filename) # noqa: PTH116
+ except OSError:
+ return # swallow does not exist or other errors
+
+ if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it
+ if not (file_stat.st_mode & stat.S_IWUSR):
+ raise PermissionError(EACCES, "Permission denied", filename)
+
+ if stat.S_ISDIR(file_stat.st_mode):
+ if sys.platform == "win32": # pragma: win32 cover
+ # On Windows, this is PermissionError
+ raise PermissionError(EACCES, "Permission denied", filename)
+ else: # pragma: win32 no cover # noqa: RET506
+ # On linux / macOS, this is IsADirectoryError
+ raise IsADirectoryError(EISDIR, "Is a directory", filename)
+
+
+def ensure_directory_exists(filename: Path | str) -> None:
+ """
+ Ensure the directory containing the file exists (create it if necessary).
+
+ :param filename: file.
+
+ """
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
+
+
+__all__ = [
+ "ensure_directory_exists",
+ "raise_on_not_writable_file",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/_windows.py b/venv/lib/python3.10/site-packages/filelock/_windows.py
new file mode 100644
index 0000000000000000000000000000000000000000..8db55dcbaa3e7bab091781b17ce22fde1fc239f2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/_windows.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import os
+import sys
+from contextlib import suppress
+from errno import EACCES
+from pathlib import Path
+from typing import cast
+
+from ._api import BaseFileLock
+from ._util import ensure_directory_exists, raise_on_not_writable_file
+
+if sys.platform == "win32": # pragma: win32 cover
+ import msvcrt
+
+ class WindowsFileLock(BaseFileLock):
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
+
+ def _acquire(self) -> None:
+ raise_on_not_writable_file(self.lock_file)
+ ensure_directory_exists(self.lock_file)
+ flags = (
+ os.O_RDWR # open for read and write
+ | os.O_CREAT # create file if not exists
+ | os.O_TRUNC # truncate file if not empty
+ )
+ try:
+ fd = os.open(self.lock_file, flags, self._context.mode)
+ except OSError as exception:
+ if exception.errno != EACCES: # has no access to this lock
+ raise
+ else:
+ try:
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
+ except OSError as exception:
+ os.close(fd) # close file first
+ if exception.errno != EACCES: # file is already locked
+ raise
+ else:
+ self._context.lock_file_fd = fd
+
+ def _release(self) -> None:
+ fd = cast(int, self._context.lock_file_fd)
+ self._context.lock_file_fd = None
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
+ os.close(fd)
+
+ with suppress(OSError): # Probably another instance of the application hat acquired the file lock.
+ Path(self.lock_file).unlink()
+
+else: # pragma: win32 no cover
+
+ class WindowsFileLock(BaseFileLock):
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
+
+ def _acquire(self) -> None:
+ raise NotImplementedError
+
+ def _release(self) -> None:
+ raise NotImplementedError
+
+
+__all__ = [
+ "WindowsFileLock",
+]
diff --git a/venv/lib/python3.10/site-packages/filelock/py.typed b/venv/lib/python3.10/site-packages/filelock/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/filelock/version.py b/venv/lib/python3.10/site-packages/filelock/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf2a247c31b187acc0502a58ec7062029e31f0fe
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/filelock/version.py
@@ -0,0 +1,16 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+ from typing import Tuple, Union
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
+else:
+ VERSION_TUPLE = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+
+__version__ = version = '3.13.4'
+__version_tuple__ = version_tuple = (3, 13, 4)
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45a18228196a239eab3c305dd6aa14b0ca177712
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a536e66d65b7311bf7800741d5d1fa4b8250a48
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f8c81f74eb7014f6d08734ad96b2c874d81d689
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef97c7494dc1d6012523a36bb5c40c399cc8aae2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..408ab7cda695581e42c8504283a4d2feae7fa620
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69569150df4a2bc986ee08cb1d58c7e6d1dd57d6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8ec5b4b3254ea8841067d235558747c2998b187
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2683a6323de9cb1da82ee86650e5e8a13fa0213e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf3c092fae7ae206d2606680f0313dae65d7bccb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py
@@ -0,0 +1,195 @@
+"""
+The base class for all types of datasets.
+"""
+import os
+import re
+from abc import ABCMeta, abstractmethod
+from typing import Dict, List, Optional
+
+from ..utils import SACREBLEU_DIR, download_file, smart_open
+
+
+class Dataset(metaclass=ABCMeta):
+ def __init__(
+ self,
+ name: str,
+ data: Optional[List[str]] = None,
+ description: Optional[str] = None,
+ citation: Optional[str] = None,
+ md5: Optional[List[str]] = None,
+ langpairs=Dict[str, List[str]],
+ **kwargs,
+ ):
+ """
+ Params come from the values in DATASETS.
+
+ :param name: Name of the dataset.
+ :param data: URL of the raw data of the dataset.
+ :param description: Description of the dataset.
+ :param citation: Citation for the dataset.
+ :param md5: MD5 checksum of the dataset.
+ :param langpairs: List of available language pairs.
+ """
+ self.name = name
+ self.data = data
+ self.description = description
+ self.citation = citation
+ self.md5 = md5
+ self.langpairs = langpairs
+ self.kwargs = kwargs
+
+ # Don't do any downloading or further processing now.
+ # Only do that lazily, when asked.
+
+ # where to store the dataset
+ self._outdir = os.path.join(SACREBLEU_DIR, self.name)
+ self._rawdir = os.path.join(self._outdir, "raw")
+
+ def maybe_download(self):
+ """
+ If the dataset isn't downloaded, use utils/download_file()
+ This can be implemented here in the base class. It should write
+ to ~/.sacreleu/DATASET/raw exactly as it does now.
+ """
+ os.makedirs(self._rawdir, exist_ok=True)
+
+ expected_checksums = self.md5 if self.md5 else [None] * len(self.data)
+
+ for url, expected_md5 in zip(self.data, expected_checksums):
+ tarball = os.path.join(self._rawdir, self._get_tarball_filename(url))
+
+ download_file(
+ url, tarball, extract_to=self._rawdir, expected_md5=expected_md5
+ )
+
+ @staticmethod
+ def _clean(s):
+ """
+ Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
+
+ :param s: The string.
+ :return: A cleaned-up string.
+ """
+ return re.sub(r"\s+", " ", s.strip())
+
+ def _get_tarball_filename(self, url):
+ """
+ Produces a local filename for tarball.
+ :param url: The url to download.
+ :return: A name produced from the dataset identifier and the URL basename.
+ """
+ return self.name.replace("/", "_") + "." + os.path.basename(url)
+
+ def _get_txt_file_path(self, langpair, fieldname):
+ """
+ Given the language pair and fieldname, return the path to the text file.
+ The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME
+
+ :param langpair: The language pair.
+ :param fieldname: The fieldname.
+ :return: The path to the text file.
+ """
+ # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev"
+ name = self.name.replace("/", "_")
+ # Colons are used to distinguish multiple references, but are not supported in Windows filenames
+ fieldname = fieldname.replace(":", "-")
+ return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}")
+
+ def _get_langpair_metadata(self, langpair):
+ """
+ Given a language pair, return the metadata for that language pair.
+ Deal with errors if the language pair is not available.
+
+ :param langpair: The language pair. e.g. "en-de"
+ :return: Dict format which is same as self.langpairs.
+ """
+ if langpair is None:
+ langpairs = self.langpairs
+ elif langpair not in self.langpairs:
+ raise Exception(f"No such language pair {self.name}/{langpair}")
+ else:
+ langpairs = {langpair: self.langpairs[langpair]}
+
+ return langpairs
+
+ @abstractmethod
+ def process_to_text(self, langpair=None) -> None:
+ """Processes raw files to plain text files.
+
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
+ """
+ pass
+
+ def fieldnames(self, langpair) -> List[str]:
+ """
+ Return a list of all the field names. For most source, this is just
+ the source and the reference. For others, it might include the document
+ ID for each line, or the original language (origLang).
+
+ get_files() should return the same number of items as this.
+
+ :param langpair: The language pair (e.g., "de-en")
+ :return: a list of field names
+ """
+ return ["src", "ref"]
+
+ def __iter__(self, langpair):
+ """
+ Iterates over all fields (source, references, and other metadata) defined
+ by the dataset.
+ """
+ all_files = self.get_files(langpair)
+ all_fins = [smart_open(f) for f in all_files]
+
+ for item in zip(*all_fins):
+ yield item
+
+ def source(self, langpair):
+ """
+ Return an iterable over the source lines.
+ """
+ source_file = self.get_source_file(langpair)
+ with smart_open(source_file) as fin:
+ for line in fin:
+ yield line.strip()
+
+ def references(self, langpair):
+ """
+ Return an iterable over the references.
+ """
+ ref_files = self.get_reference_files(langpair)
+ ref_fins = [smart_open(f) for f in ref_files]
+
+ for item in zip(*ref_fins):
+ yield item
+
+ def get_source_file(self, langpair):
+ all_files = self.get_files(langpair)
+ all_fields = self.fieldnames(langpair)
+ index = all_fields.index("src")
+ return all_files[index]
+
+ def get_reference_files(self, langpair):
+ all_files = self.get_files(langpair)
+ all_fields = self.fieldnames(langpair)
+ ref_files = [
+ f for f, field in zip(all_files, all_fields) if field.startswith("ref")
+ ]
+ return ref_files
+
+ def get_files(self, langpair):
+ """
+ Returns the path of the source file and all reference files for
+ the provided test set / language pair.
+ Downloads the references first if they are not already local.
+
+ :param langpair: The language pair (e.g., "de-en")
+ :return: a list of the source file and all reference files
+ """
+ fields = self.fieldnames(langpair)
+ files = [self._get_txt_file_path(langpair, field) for field in fields]
+
+ for file in files:
+ if not os.path.exists(file):
+ self.process_to_text(langpair)
+ return files
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1f638123e8742bb22e2f671c4af5bd0e556f685
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py
@@ -0,0 +1,116 @@
+import os
+import re
+
+from ..utils import smart_open
+from .base import Dataset
+
+
+class FakeSGMLDataset(Dataset):
+ """
+ The fake SGML format used by WMT prior to 2021. Can't be properly parsed.
+ Source and reference(s) in separate files.
+ """
+
+ def _convert_format(self, input_file_path, output_filep_path):
+ """
+ Extract data from raw file and convert to raw txt format.
+ """
+ with smart_open(input_file_path) as fin, smart_open(
+ output_filep_path, "wt"
+ ) as fout:
+ for line in fin:
+ if line.startswith("(.*).*?", "\\1", line))
+ print(line, file=fout)
+
+ def _convert_meta(self, input_file_path, field, output_filep_path):
+ """
+ Extract metadata from document tags, projects across segments.
+ """
+ with smart_open(input_file_path) as fin, smart_open(
+ output_filep_path, "wt"
+ ) as fout:
+ value = ""
+ for line in fin:
+ if line.startswith("= 2
+ ), f"Each language pair in {self.name} must have at least 2 fields."
+
+ fields = ["src"]
+
+ if length == 2:
+ fields.append("ref")
+ else:
+ for i, _ in enumerate(meta[langpair][1:]):
+ fields.append(f"ref:{i}")
+
+ if not self.name.startswith("wmt08"):
+ fields += ["docid", "genre", "origlang"]
+
+ return fields
+
+
+class WMTAdditionDataset(FakeSGMLDataset):
+ """
+ Handle special case of WMT Google addition dataset.
+ """
+
+ def _convert_format(self, input_file_path, output_filep_path):
+ if input_file_path.endswith(".sgm"):
+ return super()._convert_format(input_file_path, output_filep_path)
+ else:
+ with smart_open(input_file_path) as fin:
+ with smart_open(output_filep_path, "wt") as fout:
+ for line in fin:
+ print(line.rstrip(), file=fout)
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py b/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f78bcc3ccb3447c178c1cf2e7d3aebe4e50ed1a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py
@@ -0,0 +1,207 @@
+import os
+
+import lxml.etree as ET
+
+from ..utils import smart_open
+from .base import Dataset
+
+from collections import defaultdict
+
+
+def _get_field_by_translator(translator):
+ if not translator:
+ return "ref"
+ else:
+ return f"ref:{translator}"
+
+class WMTXMLDataset(Dataset):
+ """
+ The 2021+ WMT dataset format. Everything is contained in a single file.
+ Can be parsed with the lxml parser.
+ """
+ @staticmethod
+ def _unwrap_wmt21_or_later(raw_file):
+ """
+ Unwraps the XML file from wmt21 or later.
+ This script is adapted from https://github.com/wmt-conference/wmt-format-tools
+
+ :param raw_file: The raw xml file to unwrap.
+ :return: Dictionary which contains the following fields:
+ - `src`: The source sentences.
+ - `docid`: ID indicating which document the sentences belong to.
+ - `origlang`: The original language of the document.
+ - `ref:{translator}`: The references produced by each translator.
+ - `ref`: An alias for the references from the first translator.
+ """
+ tree = ET.parse(raw_file)
+ # Find and check the documents (src, ref, hyp)
+ src_langs, ref_langs, translators = set(), set(), set()
+ for src_doc in tree.getroot().findall(".//src"):
+ src_langs.add(src_doc.get("lang"))
+
+ for ref_doc in tree.getroot().findall(".//ref"):
+ ref_langs.add(ref_doc.get("lang"))
+ translator = ref_doc.get("translator")
+ translators.add(translator)
+
+ assert (
+ len(src_langs) == 1
+ ), f"Multiple source languages found in the file: {raw_file}"
+ assert (
+ len(ref_langs) == 1
+ ), f"Found {len(ref_langs)} reference languages found in the file: {raw_file}"
+
+ src = []
+ docids = []
+ orig_langs = []
+ domains = []
+
+ refs = { _get_field_by_translator(translator): [] for translator in translators }
+
+ systems = defaultdict(list)
+
+ src_sent_count, doc_count = 0, 0
+ for doc in tree.getroot().findall(".//doc"):
+ docid = doc.attrib["id"]
+ origlang = doc.attrib["origlang"]
+ # present wmt22++
+ domain = doc.attrib.get("domain", None)
+
+ # Skip the testsuite
+ if "testsuite" in doc.attrib:
+ continue
+
+ doc_count += 1
+ src_sents = {
+ int(seg.get("id")): seg.text for seg in doc.findall(".//src//seg")
+ }
+
+ def get_sents(doc):
+ return {
+ int(seg.get("id")): seg.text if seg.text else ""
+ for seg in doc.findall(".//seg")
+ }
+
+ ref_docs = doc.findall(".//ref")
+
+ trans_to_ref = {
+ ref_doc.get("translator"): get_sents(ref_doc) for ref_doc in ref_docs
+ }
+
+ hyp_docs = doc.findall(".//hyp")
+ hyps = {
+ hyp_doc.get("system"): get_sents(hyp_doc) for hyp_doc in hyp_docs
+ }
+
+ for seg_id in sorted(src_sents.keys()):
+ # no ref translation is available for this segment
+ if not any([value.get(seg_id, "") for value in trans_to_ref.values()]):
+ continue
+ for translator in translators:
+ refs[_get_field_by_translator(translator)].append(
+ trans_to_ref.get(translator, {translator: {}}).get(seg_id, "")
+ )
+ src.append(src_sents[seg_id])
+ for system_name in hyps.keys():
+ systems[system_name].append(hyps[system_name][seg_id])
+ docids.append(docid)
+ orig_langs.append(origlang)
+ if domain is not None:
+ domains.append(domain)
+ src_sent_count += 1
+
+ data = {"src": src, **refs, "docid": docids, "origlang": orig_langs, **systems}
+ if len(domains):
+ data["domain"] = domains
+
+ return data
+
+ def _get_langpair_path(self, langpair):
+ """
+ Returns the path for this language pair.
+ This is useful because in WMT22, the language-pair data structure can be a dict,
+ in order to allow for overriding which test set to use.
+ """
+ langpair_data = self._get_langpair_metadata(langpair)[langpair]
+ rel_path = langpair_data["path"] if isinstance(langpair_data, dict) else langpair_data[0]
+ return os.path.join(self._rawdir, rel_path)
+
+ def process_to_text(self, langpair=None):
+ """Processes raw files to plain text files.
+
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
+ """
+ # ensure that the dataset is downloaded
+ self.maybe_download()
+
+ for langpair in sorted(self._get_langpair_metadata(langpair).keys()):
+ # The data type can be a list of paths, or a dict, containing the "path"
+ # and an override on which labeled reference to use (key "refs")
+ rawfile = self._get_langpair_path(langpair)
+
+ with smart_open(rawfile) as fin:
+ fields = self._unwrap_wmt21_or_later(fin)
+
+ for fieldname in fields:
+ textfile = self._get_txt_file_path(langpair, fieldname)
+
+ # skip if the file already exists
+ if os.path.exists(textfile) and os.path.getsize(textfile) > 0:
+ continue
+
+ with smart_open(textfile, "w") as fout:
+ for line in fields[fieldname]:
+ print(self._clean(line), file=fout)
+
+ def _get_langpair_allowed_refs(self, langpair):
+ """
+ Returns the preferred references for this language pair.
+ This can be set in the language pair block (as in WMT22), and backs off to the
+ test-set-level default, or nothing.
+
+ There is one exception. In the metadata, sometimes there is no translator field
+ listed (e.g., wmt22:liv-en). In this case, the reference is set to "", and the
+ field "ref" is returned.
+ """
+ defaults = self.kwargs.get("refs", [])
+ langpair_data = self._get_langpair_metadata(langpair)[langpair]
+ if isinstance(langpair_data, dict):
+ allowed_refs = langpair_data.get("refs", defaults)
+ else:
+ allowed_refs = defaults
+ allowed_refs = [_get_field_by_translator(ref) for ref in allowed_refs]
+
+ return allowed_refs
+
+ def get_reference_files(self, langpair):
+ """
+ Returns the requested reference files.
+ This is defined as a default at the test-set level, and can be overridden per language.
+ """
+ # Iterate through the (label, file path) pairs, looking for permitted labels
+ allowed_refs = self._get_langpair_allowed_refs(langpair)
+ all_files = self.get_files(langpair)
+ all_fields = self.fieldnames(langpair)
+ ref_files = [
+ f for f, field in zip(all_files, all_fields) if field in allowed_refs
+ ]
+ return ref_files
+
+ def fieldnames(self, langpair):
+ """
+ Return a list of all the field names. For most source, this is just
+ the source and the reference. For others, it might include the document
+ ID for each line, or the original language (origLang).
+
+ get_files() should return the same number of items as this.
+
+ :param langpair: The language pair (e.g., "de-en")
+ :return: a list of field names
+ """
+ self.maybe_download()
+ rawfile = self._get_langpair_path(langpair)
+
+ with smart_open(rawfile) as fin:
+ fields = self._unwrap_wmt21_or_later(fin)
+
+ return list(fields.keys())
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a18c227748fd59cb1848539ea88b045b75e5dc64
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py
@@ -0,0 +1,11 @@
+"""The implementation of various metrics."""
+
+from .bleu import BLEU, BLEUScore # noqa: F401
+from .chrf import CHRF, CHRFScore # noqa: F401
+from .ter import TER, TERScore # noqa: F401
+
+METRICS = {
+ 'BLEU': BLEU,
+ 'CHRF': CHRF,
+ 'TER': TER,
+}
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5b97682f821a31b0282aaecf1b650f88655ea33
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1371a0247c69703a8c36f84d89e3a8533757413
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59313666c3e56bb4e633e5687d6d2c04e2fe8fa9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9155b92e53a475406de68d1dd808bca618751a86
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c22bf0d6a0bef2bb26df8db8f944c602e7e96490
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c499cdd68090594315af10613eef88a7e371071
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aeb5ac0d8d8c115eca2bd31a2ce00fbf3e458f40
Binary files /dev/null and b/venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..93fb10815a1a8b08c69bad19d2cbed58e251afc7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py
@@ -0,0 +1,438 @@
+"""The base `Score`, `Metric` and `Signature` classes to derive from.
+
+`Metric` is an abstract class that enforces the implementation of a set
+of abstract methods. This way, a correctly implemented metric will work
+seamlessly with the rest of the codebase.
+"""
+
+import json
+import logging
+import statistics
+from typing import List, Sequence, Any, Optional, Dict
+from abc import ABCMeta, abstractmethod
+
+from .. import __version__
+
+sacrelogger = logging.getLogger('sacrebleu')
+
+
+class Score:
+ """A base score class to derive from.
+
+ :param name: The name of the underlying metric.
+ :param score: A floating point number for the final metric.
+ """
+ def __init__(self, name: str, score: float):
+ """`Score` initializer."""
+ self.name = name
+ self.score = score
+
+ # Statistical test related fields
+ self._mean = -1.0
+ self._ci = -1.0
+
+ # More info can be added right after the score
+ self._verbose = ''
+
+ def format(self, width: int = 2, score_only: bool = False,
+ signature: str = '', is_json: bool = False) -> str:
+ """Returns a pretty representation of the score.
+ :param width: Floating point decimal precision width.
+ :param score_only: If `True`, and the format is not `json`,
+ returns a single score string.
+ :param signature: A string representation of the given `Signature`
+ instance.
+ :param is_json: If `True`, will output the score in JSON string.
+ :return: A plain or JSON-formatted string representation.
+ """
+ d = {
+ 'name': self.name,
+ 'score': float(f'{self.score:.{width}f}'),
+ 'signature': signature,
+ }
+
+ sc = f'{self.score:.{width}f}'
+
+ if self._mean > 0:
+ confidence_mean = f'{self._mean:.{width}f}'
+ confidence_var = f'{self._ci:.{width}f}'
+ confidence_str = f'μ = {confidence_mean} ± {confidence_var}'
+
+ sc += f' ({confidence_str})'
+ if is_json:
+ d['confidence_mean'] = float(confidence_mean)
+ d['confidence_var'] = float(confidence_var)
+ d['confidence'] = confidence_str
+
+ # Construct full score line
+ full_score = f"{self.name}|{signature}" if signature else self.name
+ full_score = f"{full_score} = {sc}"
+ if self._verbose:
+ full_score += f' {self._verbose}'
+ d['verbose_score'] = self._verbose
+
+ if score_only:
+ return sc
+
+ if is_json:
+ for param in signature.split('|'):
+ key, value = param.split(':')
+ d[key] = value
+ return json.dumps(d, indent=1, ensure_ascii=False)
+
+ return full_score
+
+ def estimate_ci(self, scores: List['Score']):
+ """Takes a list of scores and stores mean, stdev and 95% confidence
+ interval around the mean.
+
+ :param scores: A list of `Score` objects obtained from bootstrap
+ resampling for example.
+ """
+ # Sort the scores
+ raw_scores = sorted([x.score for x in scores])
+ n = len(raw_scores)
+
+ # Get CI bounds (95%, i.e. 1/40 from left)
+ lower_idx = n // 40
+ upper_idx = n - lower_idx - 1
+ lower, upper = raw_scores[lower_idx], raw_scores[upper_idx]
+ self._ci = 0.5 * (upper - lower)
+ self._mean = statistics.mean(raw_scores)
+
+ def __repr__(self):
+ """Returns a human readable score string."""
+ return self.format()
+
+
+class Signature:
+ """A convenience class to represent sacreBLEU reproducibility signatures.
+
+ :param args: key-value dictionary passed from the actual metric instance.
+ """
+ def __init__(self, args: dict):
+ """`Signature` initializer."""
+ # Global items that are shared across all metrics
+ self._abbr = {
+ 'version': 'v',
+ 'nrefs': '#',
+ 'test': 't',
+ 'lang': 'l',
+ 'subset': 'S',
+ 'origlang': 'o',
+ 'bs': 'bs', # Bootstrap resampling trials
+ 'ar': 'ar', # Approximate randomization trials
+ 'seed': 'rs', # RNG's seed
+ }
+
+ if 'num_refs' not in args:
+ raise ValueError(
+ 'Number of references unknown, please evaluate the metric first.')
+
+ num_refs = args['num_refs']
+ if num_refs == -1:
+ # Detect variable number of refs
+ num_refs = 'var'
+
+ # Global items that are shared across all metrics
+ # None's will be ignored
+ self.info = {
+ 'version': __version__,
+ 'nrefs': num_refs,
+ 'bs': args.get('n_bootstrap', None),
+ 'ar': None,
+ 'seed': args.get('seed', None),
+ 'test': args.get('test_set', None),
+ 'lang': args.get('langpair', None),
+ 'origlang': args.get('origlang', None),
+ 'subset': args.get('subset', None),
+ }
+
+ def format(self, short: bool = False) -> str:
+ """Returns a string representation of the signature.
+
+ :param short: If True, shortened signature is produced.
+ :return: A string representation of the signature.
+ """
+ pairs = []
+ keys = list(self.info.keys())
+ # keep version always at end
+ keys.remove('version')
+ for name in keys + ['version']:
+ value = self.info[name]
+ if value is not None:
+ if isinstance(value, bool):
+ # Replace True/False with yes/no
+ value = 'yes' if value else 'no'
+ final_name = self._abbr[name] if short else name
+ pairs.append(f'{final_name}:{value}')
+
+ return '|'.join(pairs)
+
+ def update(self, key: str, value: Any):
+ """Add a new item or update an existing one.
+
+ :param key: The key to use in the dictionary.
+ :param value: The associated value for the `key`.
+ """
+ self.info[key] = value
+
+ def __str__(self):
+ """Returns a human-readable signature string."""
+ return self.format()
+
+ def __repr__(self):
+ """Returns a human-readable signature string."""
+ return self.format()
+
+
+class Metric(metaclass=ABCMeta):
+ """A base class for all metrics that ensures the implementation of some
+ methods. Much of the common functionality is moved to this base class
+ from other metrics."""
+
+ # Each metric should define its Signature class' name here
+ _SIGNATURE_TYPE = Signature
+
+ def __init__(self):
+ """`Metric` initializer."""
+ # The pre-computed reference cache
+ self._ref_cache = None
+
+ # only useful for BLEU tokenized warnings. Set to True so that
+ # warnings are not issued for other metrics.
+ self._force = True
+
+ # Will be used by the signature when bootstrap resampling
+ self.n_bootstrap = None
+ self.seed = None
+
+ def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]):
+ """Performs sanity checks on `sentence_score` method's arguments.
+
+ :param hyp: A single hypothesis string.
+ :param refs: A sequence of reference strings.
+ """
+ prefix = self.__class__.__name__
+ err_msg = None
+
+ if not isinstance(hyp, str):
+ err_msg = 'The argument `hyp` should be a string.'
+ elif isinstance(refs, str) or not isinstance(refs, Sequence):
+ err_msg = 'The argument `refs` should be a sequence of strings.'
+ elif not isinstance(refs[0], str) and refs[0] is not None:
+ err_msg = 'Each element of `refs` should be a string.'
+
+ if err_msg:
+ raise TypeError(f'{prefix}: {err_msg}')
+
+ def _check_corpus_score_args(self, hyps: Sequence[str],
+ refs: Optional[Sequence[Sequence[str]]]):
+ """Performs sanity checks on `corpus_score` method's arguments.
+
+ :param hypses: A sequence of hypothesis strings.
+ :param refs: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If `None`, cached references
+ will be used.
+ """
+
+ prefix = self.__class__.__name__
+ err_msg = None
+
+ if not isinstance(hyps, Sequence):
+ err_msg = "`hyps` should be a sequence of strings."
+ elif not isinstance(hyps[0], str):
+ err_msg = 'Each element of `hyps` should be a string.'
+ elif any(line is None for line in hyps):
+ err_msg = "Undefined line in hypotheses stream!"
+
+ if refs is not None:
+ if not isinstance(refs, Sequence):
+ err_msg = "`refs` should be a sequence of sequence of strings."
+ elif not isinstance(refs[0], Sequence):
+ err_msg = "Each element of `refs` should be a sequence of strings."
+ elif not isinstance(refs[0][0], str) and refs[0][0] is not None:
+ err_msg = "`refs` should be a sequence of sequence of strings."
+
+ if err_msg:
+ raise TypeError(f'{prefix}: {err_msg}')
+
+ @abstractmethod
+ def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any:
+ """Computes the final score given the pre-computed match statistics.
+
+ :param stats: A list of segment-level statistics.
+ :return: A `Score` instance.
+ """
+ pass
+
+ @abstractmethod
+ def _compute_score_from_stats(self, stats: List[Any]) -> Any:
+ """Computes the final score from already aggregated statistics.
+
+ :param stats: A list or numpy array of segment-level statistics.
+ :return: A `Score` object.
+ """
+ pass
+
+ @abstractmethod
+ def _preprocess_segment(self, sent: str) -> str:
+ """A wrapper around the metric's tokenization and pre-processing logic.
+ This should be implemented for reference caching to work correctly.
+
+ :param sent: The input sentence.
+ :return: The pre-processed output sentence.
+ """
+ pass
+
+ @abstractmethod
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
+ """Given a list of reference segments, extract the required
+ information (such as n-grams for BLEU and chrF). This should be implemented
+ for the generic `_cache_references()` to work across all metrics.
+
+ :param refs: A sequence of strings.
+ """
+ pass
+
+ @abstractmethod
+ def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]:
+ """Given a (pre-processed) hypothesis sentence and already computed
+ reference info, returns the best match statistics across the
+ references. The return type is usually a List of ints or floats.
+
+ :param hypothesis: A pre-processed hypothesis sentence.
+ :param ref_kwargs: A dictionary with reference-related information
+ within. This is formulated as a dictionary as different metrics may
+ require different information regarding a reference segment.
+ """
+ pass
+
+ def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]:
+ """Given the full set of document references, extract segment n-grams
+ (or other necessary information) for caching purposes.
+
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. A particular reference
+ segment can be '' or `None` to allow the use of variable number
+ of references per segment.
+ :return: A list where each element is a tuple of segment n-grams and
+ reference lengths, as returned by `_extract_reference_info()`.
+ """
+ ref_cache = []
+
+ # Decide on final number of refs here as well
+ num_refs = set()
+
+ for refs in zip(*references):
+ # Remove undefined references
+ lines = [x for x in refs if x is not None]
+
+ # Keep track of reference counts to allow variable reference
+ # info in the signature
+ num_refs.add(len(lines))
+
+ lines = [self._preprocess_segment(x) for x in lines]
+
+ # Get n-grams
+ ref_cache.append(self._extract_reference_info(lines))
+
+ if len(num_refs) == 1:
+ self.num_refs = list(num_refs)[0]
+ else:
+ # A variable number of refs exist
+ self.num_refs = -1
+
+ return ref_cache
+
+ def _extract_corpus_statistics(self, hypotheses: Sequence[str],
+ references: Optional[Sequence[Sequence[str]]]) -> Any:
+ """Reads the corpus and returns sentence-level match statistics for
+ faster re-computations esp. during statistical tests.
+
+ :param hypotheses: A sequence of hypothesis strings.
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If `None`, cached references
+ will be used.
+ :return: A list where each sublist corresponds to segment statistics.
+ """
+ # Pre-compute references
+ # Don't store the cache as the user is explicitly passing refs
+ if references:
+ ref_cache = self._cache_references(references)
+ elif self._ref_cache:
+ ref_cache = self._ref_cache
+ else:
+ raise RuntimeError('No references provided and the cache is empty.')
+
+ stats = []
+ tok_count = 0
+
+ for hyp, ref_kwargs in zip(hypotheses, ref_cache):
+ # Check for already-tokenized input problem (only for BLEU)
+ if not self._force and hyp.endswith(' .'):
+ tok_count += 1
+
+ hyp = self._preprocess_segment(hyp)
+
+ # Collect stats
+ stats.append(self._compute_segment_statistics(hyp, ref_kwargs))
+
+ if tok_count >= 100:
+ sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')")
+ sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.")
+ sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.")
+
+ return stats
+
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any:
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
+
+ :param hypothesis: A single hypothesis string.
+ :param references: A sequence of reference strings.
+ :return: A `Score` object.
+ """
+ self._check_sentence_score_args(hypothesis, references)
+
+ stats = self._extract_corpus_statistics(
+ [hypothesis], [[refs] for refs in references])
+ return self._aggregate_and_compute(stats)
+
+ def corpus_score(self, hypotheses: Sequence[str],
+ references: Optional[Sequence[Sequence[str]]],
+ n_bootstrap: int = 1) -> Any:
+ """Compute the metric for a corpus against a single (or multiple) reference(s).
+
+ :param hypotheses: A sequence of hypothesis strings.
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If `None`, cached references
+ will be used.
+ :param n_bootstrap: If > 1, provides 95% confidence interval around true mean
+ using bootstrap resampling with `n_bootstrap` samples.
+ :return: A `Score` object.
+ """
+ self._check_corpus_score_args(hypotheses, references)
+
+ # Collect corpus stats
+ stats = self._extract_corpus_statistics(hypotheses, references)
+
+ # Compute the actual system score
+ actual_score = self._aggregate_and_compute(stats)
+
+ if n_bootstrap > 1:
+ # Compute bootstrap estimate as well
+ # Delayed import is to escape from numpy import if bootstrap
+ # is not requested.
+ from ..significance import _bootstrap_resample
+
+ self.n_bootstrap = n_bootstrap
+ self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap)
+ actual_score.estimate_ci(bs_scores)
+
+ return actual_score
+
+ def get_signature(self) -> Signature:
+ """Creates and returns the signature for the metric. The creation
+ of signatures is delayed as the number of references is resolved
+ only at the point of reference caching."""
+ return self._SIGNATURE_TYPE(self.__dict__)
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ca77b9af5c4ecc77acde3b7816607d11cd4bc7f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py
@@ -0,0 +1,420 @@
+"""The implementation of the BLEU metric (Papineni et al., 2002)."""
+
+import math
+import logging
+from importlib import import_module
+from typing import List, Sequence, Optional, Dict, Any
+
+from ..utils import my_log, sum_of_lists
+
+from .base import Score, Signature, Metric
+from .helpers import extract_all_word_ngrams
+
+sacrelogger = logging.getLogger('sacrebleu')
+
+# The default for the maximum n-gram order when computing precisions
+MAX_NGRAM_ORDER = 4
+
+_TOKENIZERS = {
+ 'none': 'tokenizer_none.NoneTokenizer',
+ 'zh': 'tokenizer_zh.TokenizerZh',
+ '13a': 'tokenizer_13a.Tokenizer13a',
+ 'intl': 'tokenizer_intl.TokenizerV14International',
+ 'char': 'tokenizer_char.TokenizerChar',
+ 'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab',
+ 'ko-mecab': 'tokenizer_ko_mecab.TokenizerKoMecab',
+ 'spm': 'tokenizer_spm.TokenizerSPM',
+ 'flores101': 'tokenizer_spm.Flores101Tokenizer',
+ 'flores200': 'tokenizer_spm.Flores200Tokenizer',
+}
+
+
+def _get_tokenizer(name: str):
+ """Dynamically import tokenizer as importing all is slow."""
+ module_name, class_name = _TOKENIZERS[name].rsplit('.', 1)
+ return getattr(
+ import_module(f'.tokenizers.{module_name}', 'sacrebleu'),
+ class_name)
+
+
+class BLEUSignature(Signature):
+ """A convenience class to represent the reproducibility signature for BLEU.
+
+ :param args: key-value dictionary passed from the actual metric instance.
+ """
+ def __init__(self, args: dict):
+ """`BLEUSignature` initializer."""
+ super().__init__(args)
+
+ self._abbr.update({
+ 'case': 'c',
+ 'eff': 'e',
+ 'tok': 'tok',
+ 'smooth': 's',
+ })
+
+ # Construct a combined string for smoothing method and value
+ smooth_str = args['smooth_method']
+ smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str]
+
+ # If the method requires a parameter, add it within brackets
+ if smooth_def is not None:
+ # the following can be None if the user wants to use the default
+ smooth_val = args['smooth_value']
+
+ if smooth_val is None:
+ smooth_val = smooth_def
+
+ smooth_str += f'[{smooth_val:.2f}]'
+
+ self.info.update({
+ 'case': 'lc' if args['lowercase'] else 'mixed',
+ 'eff': 'yes' if args['effective_order'] else 'no',
+ 'tok': args['tokenizer_signature'],
+ 'smooth': smooth_str,
+ })
+
+
+class BLEUScore(Score):
+ """A convenience class to represent BLEU scores.
+
+ :param score: The BLEU score.
+ :param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order
+ :param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order
+ :param precisions: List of precisions, 1 <= n <= max_ngram_order
+ :param bp: The brevity penalty.
+ :param sys_len: The cumulative system length.
+ :param ref_len: The cumulative reference length.
+ """
+ def __init__(self, score: float, counts: List[int], totals: List[int],
+ precisions: List[float], bp: float,
+ sys_len: int, ref_len: int):
+ """`BLEUScore` initializer."""
+ super().__init__('BLEU', score)
+ self.bp = bp
+ self.counts = counts
+ self.totals = totals
+ self.sys_len = sys_len
+ self.ref_len = ref_len
+ self.precisions = precisions
+
+ self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions])
+ self.ratio = self.sys_len / self.ref_len if self.ref_len else 0
+
+ # The verbose part of BLEU
+ self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} "
+ self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} "
+ self._verbose += f"ref_len = {self.ref_len:d})"
+
+
+class BLEU(Metric):
+ """Computes the BLEU metric given hypotheses and references.
+
+ :param lowercase: If True, lowercased BLEU is computed.
+ :param force: Ignore data that looks already tokenized.
+ :param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default.
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none').
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
+ `True`, if sentence-level BLEU will be computed.
+ :param trg_lang: An optional language code to raise potential tokenizer warnings.
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If given, the reference n-grams
+ and lengths will be pre-computed and cached for faster BLEU computation
+ across many systems.
+ """
+
+ SMOOTH_DEFAULTS: Dict[str, Optional[float]] = {
+ # The defaults for `floor` and `add-k` are obtained from the following paper
+ # A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU
+ # Boxing Chen and Colin Cherry
+ # http://aclweb.org/anthology/W14-3346
+ 'none': None, # No value is required
+ 'floor': 0.1,
+ 'add-k': 1,
+ 'exp': None, # No value is required
+ }
+
+ TOKENIZERS = _TOKENIZERS.keys()
+
+ # mteval-v13a.pl tokenizer unless Chinese or Japanese is provided
+ TOKENIZER_DEFAULT = '13a'
+
+ # Some language specific mappings to use if `trg_lang` is given
+ # and the tokenizer is not explicitly specified
+ _TOKENIZER_MAP = {
+ 'zh': 'zh',
+ 'ja': 'ja-mecab',
+ 'ko': 'ko-mecab',
+ }
+
+ _SIGNATURE_TYPE = BLEUSignature
+
+ def __init__(self, lowercase: bool = False,
+ force: bool = False,
+ tokenize: Optional[str] = None,
+ smooth_method: str = 'exp',
+ smooth_value: Optional[float] = None,
+ max_ngram_order: int = MAX_NGRAM_ORDER,
+ effective_order: bool = False,
+ trg_lang: str = '',
+ references: Optional[Sequence[Sequence[str]]] = None):
+ """`BLEU` initializer."""
+ super().__init__()
+
+ self._force = force
+ self.trg_lang = trg_lang
+ self.lowercase = lowercase
+ self.smooth_value = smooth_value
+ self.smooth_method = smooth_method
+ self.max_ngram_order = max_ngram_order
+ self.effective_order = effective_order
+
+ # Sanity check
+ assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \
+ "Unknown smooth_method {self.smooth_method!r}"
+
+ # If the tokenizer wasn't specified, choose it according to the
+ # following logic. We use 'v13a' except for ZH and JA. Note that
+ # this logic can only be applied when sacrebleu knows the target
+ # language, which is only the case for builtin datasets.
+ if tokenize is None:
+ best_tokenizer = self.TOKENIZER_DEFAULT
+
+ # Set `zh` or `ja-mecab` or `ko-mecab` if target language is provided
+ if self.trg_lang in self._TOKENIZER_MAP:
+ best_tokenizer = self._TOKENIZER_MAP[self.trg_lang]
+ else:
+ best_tokenizer = tokenize
+ if self.trg_lang == 'zh' and best_tokenizer != 'zh':
+ sacrelogger.warning(
+ "Consider using the 'zh' or 'spm' tokenizer for Chinese.")
+ if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab':
+ sacrelogger.warning(
+ "Consider using the 'ja-mecab' or 'spm' tokenizer for Japanese.")
+ if self.trg_lang == 'ko' and best_tokenizer != 'ko-mecab':
+ sacrelogger.warning(
+ "Consider using the 'ko-mecab' or 'spm' tokenizer for Korean.")
+
+ # Create the tokenizer
+ self.tokenizer = _get_tokenizer(best_tokenizer)()
+
+ # Build the signature
+ self.tokenizer_signature = self.tokenizer.signature()
+
+ if references is not None:
+ # Pre-compute reference ngrams and lengths
+ self._ref_cache = self._cache_references(references)
+
+ @staticmethod
+ def compute_bleu(correct: List[int],
+ total: List[int],
+ sys_len: int,
+ ref_len: int,
+ smooth_method: str = 'none',
+ smooth_value=None,
+ effective_order: bool = False,
+ max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore:
+ """Computes BLEU score from its sufficient statistics with smoothing.
+
+ Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
+ Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
+
+ - none: No smoothing.
+ - floor: Method 1 (requires small positive value (0.1 in the paper) to be set)
+ - add-k: Method 2 (Generalizing Lin and Och, 2004)
+ - exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl)
+
+ :param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order
+ :param total: List of counts of total ngrams, 1 <= n <= max_ngram_order
+ :param sys_len: The cumulative system length
+ :param ref_len: The cumulative reference length
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
+ `True`, if sentence-level BLEU will be computed.
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
+ :return: A `BLEUScore` instance.
+ """
+ assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \
+ "Unknown smooth_method {smooth_method!r}"
+
+ # Fetch the default value for floor and add-k
+ if smooth_value is None:
+ smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method]
+
+ # Compute brevity penalty
+ if sys_len < ref_len:
+ bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
+ else:
+ bp = 1.0
+
+ # n-gram precisions
+ precisions = [0.0 for x in range(max_ngram_order)]
+
+ # Early stop if there are no matches (#141)
+ if not any(correct):
+ return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len)
+
+ smooth_mteval = 1.
+ eff_order = max_ngram_order
+ for n in range(1, len(precisions) + 1):
+ if smooth_method == 'add-k' and n > 1:
+ correct[n - 1] += smooth_value
+ total[n - 1] += smooth_value
+
+ if total[n - 1] == 0:
+ break
+
+ # If the system guesses no i-grams, 1 <= i <= max_ngram_order,
+ # the BLEU score is 0 (technically undefined). This is a problem for sentence
+ # level BLEU or a corpus of short sentences, where systems will get
+ # no credit if sentence lengths fall under the max_ngram_order threshold.
+ # This fix scales max_ngram_order to the observed maximum order.
+ # It is only available through the API and off by default
+ if effective_order:
+ eff_order = n
+
+ if correct[n - 1] == 0:
+ if smooth_method == 'exp':
+ smooth_mteval *= 2
+ precisions[n - 1] = 100. / (smooth_mteval * total[n - 1])
+ elif smooth_method == 'floor':
+ precisions[n - 1] = 100. * smooth_value / total[n - 1]
+ else:
+ precisions[n - 1] = 100. * correct[n - 1] / total[n - 1]
+
+ # Compute BLEU score
+ score = bp * math.exp(
+ sum([my_log(p) for p in precisions[:eff_order]]) / eff_order)
+
+ return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len)
+
+ def _preprocess_segment(self, sent: str) -> str:
+ """Given a sentence, lowercases (optionally) and tokenizes it
+ :param sent: The input sentence string.
+ :return: The pre-processed output string.
+ """
+ if self.lowercase:
+ sent = sent.lower()
+ return self.tokenizer(sent.rstrip())
+
+ def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore:
+ """Computes the final score from already aggregated statistics.
+
+ :param stats: A list or numpy array of segment-level statistics.
+ :return: A `BLEUScore` object.
+ """
+ return self.compute_bleu(
+ correct=stats[2: 2 + self.max_ngram_order],
+ total=stats[2 + self.max_ngram_order:],
+ sys_len=int(stats[0]), ref_len=int(stats[1]),
+ smooth_method=self.smooth_method, smooth_value=self.smooth_value,
+ effective_order=self.effective_order,
+ max_ngram_order=self.max_ngram_order
+ )
+
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore:
+ """Computes the final BLEU score given the pre-computed corpus statistics.
+
+ :param stats: A list of segment-level statistics
+ :return: A `BLEUScore` instance.
+ """
+ return self._compute_score_from_stats(sum_of_lists(stats))
+
+ def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int:
+ """Given a hypothesis length and a list of reference lengths, returns
+ the closest reference length to be used by BLEU.
+
+ :param hyp_len: The hypothesis length.
+ :param ref_lens: A list of reference lengths.
+ :return: The closest reference length.
+ """
+ closest_diff, closest_len = -1, -1
+
+ for ref_len in ref_lens:
+ diff = abs(hyp_len - ref_len)
+ if closest_diff == -1 or diff < closest_diff:
+ closest_diff = diff
+ closest_len = ref_len
+ elif diff == closest_diff and ref_len < closest_len:
+ closest_len = ref_len
+
+ return closest_len
+
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
+ """Given a list of reference segments, extract the n-grams and reference lengths.
+ The latter will be useful when comparing hypothesis and reference lengths for BLEU.
+
+ :param refs: A sequence of strings.
+ :return: A dictionary that will be passed to `_compute_segment_statistics()`
+ through keyword arguments.
+ """
+ ngrams = None
+ ref_lens = []
+
+ for ref in refs:
+ # extract n-grams for this ref
+ this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order)
+ ref_lens.append(ref_len)
+
+ if ngrams is None:
+ # Set it directly for first set of refs
+ ngrams = this_ngrams
+ else:
+ # Merge counts across multiple references
+ # The below loop is faster than `ngrams |= this_ngrams`
+ for ngram, count in this_ngrams.items():
+ ngrams[ngram] = max(ngrams[ngram], count)
+
+ return {'ref_ngrams': ngrams, 'ref_lens': ref_lens}
+
+ def _compute_segment_statistics(self, hypothesis: str,
+ ref_kwargs: Dict) -> List[int]:
+ """Given a (pre-processed) hypothesis sentence and already computed
+ reference n-grams & lengths, returns the best match statistics across the
+ references.
+
+ :param hypothesis: Hypothesis sentence.
+ :param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys
+ that denote the counter containing all n-gram counts and reference lengths,
+ respectively.
+ :return: A list of integers with match statistics.
+ """
+
+ ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens']
+
+ # Extract n-grams for the hypothesis
+ hyp_ngrams, hyp_len = extract_all_word_ngrams(
+ hypothesis, 1, self.max_ngram_order)
+
+ ref_len = self._get_closest_ref_len(hyp_len, ref_lens)
+
+ # Count the stats
+ # Although counter has its internal & and | operators, this is faster
+ correct = [0 for i in range(self.max_ngram_order)]
+ total = correct[:]
+ for hyp_ngram, hyp_count in hyp_ngrams.items():
+ # n-gram order
+ n = len(hyp_ngram) - 1
+ # count hypothesis n-grams
+ total[n] += hyp_count
+ # count matched n-grams
+ if hyp_ngram in ref_ngrams:
+ correct[n] += min(hyp_count, ref_ngrams[hyp_ngram])
+
+ # Return a flattened list for efficient computation
+ return [hyp_len, ref_len] + correct + total
+
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore:
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
+
+ :param hypothesis: A single hypothesis string.
+ :param references: A sequence of reference strings.
+ :return: a `BLEUScore` object.
+ """
+ if not self.effective_order:
+ sacrelogger.warning(
+ 'It is recommended to enable `effective_order` for sentence-level BLEU.')
+ return super().sentence_score(hypothesis, references)
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7d4f6858d0c6005f97ad8011a0b17bd97c2bcea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py
@@ -0,0 +1,284 @@
+"""The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics."""
+
+from typing import List, Sequence, Optional, Dict
+from collections import Counter
+
+from ..utils import sum_of_lists
+from .base import Score, Signature, Metric
+from .helpers import extract_all_char_ngrams, extract_word_ngrams
+
+
+class CHRFSignature(Signature):
+ """A convenience class to represent the reproducibility signature for chrF.
+
+ :param args: key-value dictionary passed from the actual metric instance.
+ """
+ def __init__(self, args: dict):
+ """`CHRFSignature` initializer."""
+ super().__init__(args)
+ self._abbr.update({
+ 'case': 'c',
+ 'eff': 'e',
+ 'nc': 'nc',
+ 'nw': 'nw',
+ 'space': 's',
+ })
+
+ self.info.update({
+ 'case': 'lc' if args['lowercase'] else 'mixed',
+ 'eff': 'yes' if not args['eps_smoothing'] else 'no',
+ 'nc': args['char_order'],
+ 'nw': args['word_order'],
+ 'space': 'yes' if args['whitespace'] else 'no',
+ })
+
+
+class CHRFScore(Score):
+ """A convenience class to represent chrF scores.
+
+ :param score: The chrF (chrF++) score.
+ :param char_order: The character n-gram order.
+ :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++.
+ :param beta: Determine the importance of recall w.r.t precision.
+ """
+ def __init__(self, score: float, char_order: int, word_order: int, beta: int):
+ """`CHRFScore` initializer."""
+ self.beta = beta
+ self.char_order = char_order
+ self.word_order = word_order
+
+ # Add + signs to denote chrF+ variant
+ name = f'chrF{self.beta}' + '+' * self.word_order
+
+ super().__init__(name, score)
+
+
+class CHRF(Metric):
+ """Computes the chrF(++) metric given hypotheses and references.
+
+ :param char_order: Character n-gram order.
+ :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
+ :param beta: Determine the importance of recall w.r.t precision.
+ :param lowercase: Enable case-insensitivity.
+ :param whitespace: If `True`, include whitespaces when extracting character n-grams.
+ :param eps_smoothing: If `True`, applies epsilon smoothing similar
+ to reference chrF++.py, NLTK and Moses implementations. Otherwise,
+ it takes into account effective match order similar to sacreBLEU < 2.0.0.
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If given, the reference n-grams
+ will be pre-computed and cached for faster re-computation across many systems.
+ """
+
+ # Maximum character n-gram order to take into account
+ CHAR_ORDER = 6
+
+ # chrF+ additionally takes into account some of the word n-grams
+ WORD_ORDER = 0
+
+ # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341)
+ BETA = 2
+
+ # Cache string.punctuation for chrF+' punctuation stripper
+ _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
+
+ _SIGNATURE_TYPE = CHRFSignature
+
+ def __init__(self, char_order: int = CHAR_ORDER,
+ word_order: int = WORD_ORDER,
+ beta: int = BETA,
+ lowercase: bool = False,
+ whitespace: bool = False,
+ eps_smoothing: bool = False,
+ references: Optional[Sequence[Sequence[str]]] = None):
+ """`CHRF` initializer."""
+ super().__init__()
+
+ self.beta = beta
+ self.char_order = char_order
+ self.word_order = word_order
+ self.order = self.char_order + self.word_order
+ self.lowercase = lowercase
+ self.whitespace = whitespace
+ self.eps_smoothing = eps_smoothing
+
+ if references is not None:
+ # Pre-compute reference ngrams
+ self._ref_cache = self._cache_references(references)
+
+ @staticmethod
+ def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]:
+ """Computes the match statistics between hypothesis and reference n-grams.
+
+ :param hyp_ngrams: A `Counter` holding hypothesis n-grams.
+ :param ref_ngrams: A `Counter` holding reference n-grams.
+ :return: A list of three numbers denoting hypothesis n-gram count,
+ reference n-gram count and the intersection count.
+ """
+ # Counter's internal intersection is not that fast, count manually
+ match_count, hyp_count = 0, 0
+ for ng, count in hyp_ngrams.items():
+ hyp_count += count
+ if ng in ref_ngrams:
+ match_count += min(count, ref_ngrams[ng])
+
+ return [
+ # Don't count hits if no reference exists for that n-gram
+ hyp_count if ref_ngrams else 0,
+ sum(ref_ngrams.values()),
+ match_count,
+ ]
+
+ def _remove_punctuation(self, sent: str) -> List[str]:
+ """Separates out punctuations from beginning and end of words for chrF.
+ Adapted from https://github.com/m-popovic/chrF
+
+ :param sent: A string.
+ :return: A list of words.
+ """
+ tokenized = []
+ for w in sent.split():
+ if len(w) == 1:
+ tokenized.append(w)
+ else:
+ # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124)
+ if w[-1] in self._PUNCTS:
+ tokenized += [w[:-1], w[-1]]
+ elif w[0] in self._PUNCTS:
+ tokenized += [w[0], w[1:]]
+ else:
+ tokenized.append(w)
+ return tokenized
+
+ def _preprocess_segment(self, sent: str) -> str:
+ """Given a sentence, apply optional lowercasing.
+
+ :param sent: The input sentence string.
+ :return: The pre-processed output string.
+ """
+ return sent.lower() if self.lowercase else sent
+
+ def _compute_f_score(self, statistics: List[int]) -> float:
+ """Compute the chrF score given the n-gram match statistics.
+
+ :param statistics: A flattened list of 3 * (`char_order` + `word_order`)
+ elements giving the [hyp, ref, match] counts for each order.
+ :return: The final f_beta score between [0, 100].
+ """
+ eps = 1e-16
+ score = 0.0
+ effective_order = 0
+ factor = self.beta ** 2
+ avg_prec, avg_rec = 0.0, 0.0
+
+ for i in range(self.order):
+ n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3]
+
+ # chrF++.py style EPS smoothing (also used by Moses and NLTK)
+ prec = n_match / n_hyp if n_hyp > 0 else eps
+ rec = n_match / n_ref if n_ref > 0 else eps
+
+ denom = factor * prec + rec
+ score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps
+
+ # sacreBLEU <2.0.0 style effective order smoothing
+ if n_hyp > 0 and n_ref > 0:
+ avg_prec += prec
+ avg_rec += rec
+ effective_order += 1
+
+ if self.eps_smoothing:
+ return 100 * score / self.order
+
+ if effective_order == 0:
+ avg_prec = avg_rec = 0.0
+ else:
+ avg_prec /= effective_order
+ avg_rec /= effective_order
+
+ if avg_prec + avg_rec:
+ score = (1 + factor) * avg_prec * avg_rec
+ score /= ((factor * avg_prec) + avg_rec)
+ return 100 * score
+ else:
+ return 0.0
+
+ def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore:
+ """Computes the final score from already aggregated statistics.
+
+ :param stats: A list or numpy array of segment-level statistics.
+ :return: A `CHRFScore` object.
+ """
+ return CHRFScore(
+ self._compute_f_score(stats), self.char_order,
+ self.word_order, self.beta)
+
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore:
+ """Computes the final score given the pre-computed corpus statistics.
+
+ :param stats: A list of segment-level statistics
+ :return: A `CHRFScore` object.
+ """
+ return self._compute_score_from_stats(sum_of_lists(stats))
+
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]:
+ """Given a list of reference segments, extract the character and word n-grams.
+
+ :param refs: A sequence of reference segments.
+ :return: A list where each element contains n-grams per reference segment.
+ """
+ ngrams = []
+
+ for ref in refs:
+ # extract character n-grams
+ stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace)
+
+ # Check chrF+ mode
+ if self.word_order > 0:
+ ref_words = self._remove_punctuation(ref)
+
+ for n in range(self.word_order):
+ stats.append(extract_word_ngrams(ref_words, n + 1))
+
+ ngrams.append(stats)
+
+ return {'ref_ngrams': ngrams}
+
+ def _compute_segment_statistics(
+ self, hypothesis: str, ref_kwargs: Dict) -> List[int]:
+ """Given a (pre-processed) hypothesis sentence and already computed
+ reference n-grams, returns the best match statistics across the
+ references.
+
+ :param hypothesis: Hypothesis sentence.
+ :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list
+ where each sublist contains n-gram counters for a particular reference sentence.
+ :return: A list of integers where each triplet denotes [hyp, ref, match]
+ statistics.
+ """
+ best_stats = []
+ best_f_score = -1.0
+
+ # extract character n-grams
+ all_hyp_ngrams = extract_all_char_ngrams(
+ hypothesis, self.char_order, self.whitespace)
+
+ # Check chrF+ mode to see if we'll add word n-grams as well
+ if self.word_order > 0:
+ # Primitive tokenization: separate out punctuations
+ hwords = self._remove_punctuation(hypothesis)
+ _range = range(1, self.word_order + 1)
+ all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range])
+
+ # Iterate over multiple references, pick the one with best F score
+ for _ref_ngrams in ref_kwargs['ref_ngrams']:
+ stats = []
+ # Traverse all orders
+ for h, r in zip(all_hyp_ngrams, _ref_ngrams):
+ stats.extend(self._get_match_statistics(h, r))
+ f_score = self._compute_f_score(stats)
+
+ if f_score > best_f_score:
+ best_f_score = f_score
+ best_stats = stats
+
+ return best_stats
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..72ec14461658249fcd63a139623f3ead9a4aa057
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py
@@ -0,0 +1,69 @@
+"""Various utility functions for word and character n-gram extraction."""
+
+from collections import Counter
+from typing import List, Tuple
+
+
+def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]:
+ """Extracts all ngrams (min_order <= n <= max_order) from a sentence.
+
+ :param line: A string sentence.
+ :param min_order: Minimum n-gram order.
+ :param max_order: Maximum n-gram order.
+ :return: a Counter object with n-grams counts and the sequence length.
+ """
+
+ ngrams = []
+ tokens = line.split()
+
+ for n in range(min_order, max_order + 1):
+ for i in range(0, len(tokens) - n + 1):
+ ngrams.append(tuple(tokens[i: i + n]))
+
+ return Counter(ngrams), len(tokens)
+
+
+def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
+ """Extracts n-grams with order `n` from a list of tokens.
+
+ :param tokens: A list of tokens.
+ :param n: The order of n-grams.
+ :return: a Counter object with n-grams counts.
+ """
+ return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
+
+
+def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter:
+ """Yields counts of character n-grams from a sentence.
+
+ :param line: A segment containing a sequence of words.
+ :param n: The order of the n-grams.
+ :param include_whitespace: If given, will not strip whitespaces from the line.
+ :return: a dictionary containing ngrams and counts
+ """
+ if not include_whitespace:
+ line = ''.join(line.split())
+
+ return Counter([line[i:i + n] for i in range(len(line) - n + 1)])
+
+
+def extract_all_char_ngrams(
+ line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]:
+ """Extracts all character n-grams at once for convenience.
+
+ :param line: A segment containing a sequence of words.
+ :param max_order: The maximum order of the n-grams.
+ :param include_whitespace: If given, will not strip whitespaces from the line.
+ :return: a list of Counter objects containing ngrams and counts.
+ """
+
+ counters = []
+
+ if not include_whitespace:
+ line = ''.join(line.split())
+
+ for n in range(1, max_order + 1):
+ ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)])
+ counters.append(ngrams)
+
+ return counters
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d2de4944c955ebf0c8b37fce7f04eb16f79c026
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py
@@ -0,0 +1,478 @@
+"""This module implements various utility functions for the TER metric."""
+
+# Copyright 2020 Memsource
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import math
+from typing import List, Tuple, Dict
+
+
+_COST_INS = 1
+_COST_DEL = 1
+_COST_SUB = 1
+
+# Tercom-inspired limits
+_MAX_SHIFT_SIZE = 10
+_MAX_SHIFT_DIST = 50
+_BEAM_WIDTH = 25
+
+# Our own limits
+_MAX_CACHE_SIZE = 10000
+_MAX_SHIFT_CANDIDATES = 1000
+_INT_INFINITY = int(1e16)
+
+_OP_INS = 'i'
+_OP_DEL = 'd'
+_OP_NOP = ' '
+_OP_SUB = 's'
+_OP_UNDEF = 'x'
+
+_FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS)
+
+
+def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]:
+ """Calculate the translation edit rate.
+
+ :param words_hyp: Tokenized translation hypothesis.
+ :param words_ref: Tokenized reference translation.
+ :return: tuple (number of edits, length)
+ """
+ n_words_ref = len(words_ref)
+ n_words_hyp = len(words_hyp)
+ if n_words_ref == 0:
+ # FIXME: This trace here is not used?
+ trace = _OP_DEL * n_words_hyp
+ # special treatment of empty refs
+ return n_words_hyp, 0
+
+ cached_ed = BeamEditDistance(words_ref)
+ shifts = 0
+
+ input_words = words_hyp
+ checked_candidates = 0
+ while True:
+ # do shifts until they stop reducing the edit distance
+ delta, new_input_words, checked_candidates = _shift(
+ input_words, words_ref, cached_ed, checked_candidates)
+
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
+ break
+
+ if delta <= 0:
+ break
+ shifts += 1
+ input_words = new_input_words
+
+ edit_distance, trace = cached_ed(input_words)
+ total_edits = shifts + edit_distance
+
+ return total_edits, n_words_ref
+
+
+def _shift(words_h: List[str], words_r: List[str], cached_ed,
+ checked_candidates: int) -> Tuple[int, List[str], int]:
+ """Attempt to shift words in hypothesis to match reference.
+
+ Returns the shift that reduces the edit distance the most.
+
+ Note that the filtering of possible shifts and shift selection are heavily
+ based on somewhat arbitrary heuristics. The code here follows as closely
+ as possible the logic in Tercom, not always justifying the particular design
+ choices.
+
+ :param words_h: Hypothesis.
+ :param words_r: Reference.
+ :param cached_ed: Cached edit distance.
+ :param checked_candidates: Number of shift candidates that were already
+ evaluated.
+ :return: (score, shifted_words, checked_candidates). Best shift and updated
+ number of evaluated shift candidates.
+ """
+ pre_score, inv_trace = cached_ed(words_h)
+
+ # to get alignment, we pretend we are rewriting reference into hypothesis,
+ # so we need to flip the trace of edit operations
+ trace = _flip_trace(inv_trace)
+ align, ref_err, hyp_err = trace_to_alignment(trace)
+
+ best = None
+
+ for start_h, start_r, length in _find_shifted_pairs(words_h, words_r):
+ # don't do the shift unless both the hypothesis was wrong and the
+ # reference doesn't match hypothesis at the target position
+ if sum(hyp_err[start_h: start_h + length]) == 0:
+ continue
+
+ if sum(ref_err[start_r: start_r + length]) == 0:
+ continue
+
+ # don't try to shift within the subsequence
+ if start_h <= align[start_r] < start_h + length:
+ continue
+
+ prev_idx = -1
+ for offset in range(-1, length):
+ if start_r + offset == -1:
+ idx = 0 # insert before the beginning
+ elif start_r + offset in align:
+ # Unlike Tercom which inserts *after* the index, we insert
+ # *before* the index.
+ idx = align[start_r + offset] + 1
+ else:
+ break # offset is out of bounds => aims past reference
+
+ if idx == prev_idx:
+ continue # skip idx if already tried
+
+ prev_idx = idx
+
+ shifted_words = _perform_shift(words_h, start_h, length, idx)
+ assert(len(shifted_words) == len(words_h))
+
+ # Elements of the tuple are designed to replicate Tercom ranking
+ # of shifts:
+ candidate = (
+ pre_score - cached_ed(shifted_words)[0], # highest score first
+ length, # then, longest match first
+ -start_h, # then, earliest match first
+ -idx, # then, earliest target position first
+ shifted_words,
+ )
+
+ checked_candidates += 1
+
+ if not best or candidate > best:
+ best = candidate
+
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
+ break
+
+ if not best:
+ return 0, words_h, checked_candidates
+ else:
+ best_score, _, _, _, shifted_words = best
+ return best_score, shifted_words, checked_candidates
+
+
+def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]:
+ """Perform a shift in `words` from `start` to `target`.
+
+ :param words: Words to shift.
+ :param start: Where from.
+ :param length: How many words.
+ :param target: Where to.
+ :return: Shifted words.
+ """
+ if target < start:
+ # shift before previous position
+ return words[:target] + words[start: start + length] \
+ + words[target: start] + words[start + length:]
+ elif target > start + length:
+ # shift after previous position
+ return words[:start] + words[start + length: target] \
+ + words[start: start + length] + words[target:]
+ else:
+ # shift within the shifted string
+ return words[:start] + words[start + length: length + target] \
+ + words[start: start + length] + words[length + target:]
+
+
+def _find_shifted_pairs(words_h: List[str], words_r: List[str]):
+ """Find matching word sub-sequences in two lists of words.
+
+ Ignores sub-sequences starting at the same position.
+
+ :param words_h: First word list.
+ :param words_r: Second word list.
+ :return: Yields tuples of (h_start, r_start, length) such that:
+ words_h[h_start:h_start+length] = words_r[r_start:r_start+length]
+ """
+ n_words_h = len(words_h)
+ n_words_r = len(words_r)
+ for start_h in range(n_words_h):
+ for start_r in range(n_words_r):
+ # this is slightly different from what tercom does but this should
+ # really only kick in in degenerate cases
+ if abs(start_r - start_h) > _MAX_SHIFT_DIST:
+ continue
+
+ length = 0
+ while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE:
+ length += 1
+
+ yield start_h, start_r, length
+
+ # If one sequence is consumed, stop processing
+ if n_words_h == start_h + length or n_words_r == start_r + length:
+ break
+
+
+def _flip_trace(trace):
+ """Flip the trace of edit operations.
+
+ Instead of rewriting a->b, get a recipe for rewriting b->a.
+
+ Simply flips insertions and deletions.
+ """
+ return trace.translate(_FLIP_OPS)
+
+
+def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]:
+ """Transform trace of edit operations into an alignment of the sequences.
+
+ :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d').
+ :return: Alignment, error positions in reference, error positions in hypothesis.
+ """
+ pos_hyp = -1
+ pos_ref = -1
+ hyp_err = []
+ ref_err = []
+ align = {}
+
+ # we are rewriting a into b
+ for op in trace:
+ if op == _OP_NOP:
+ pos_hyp += 1
+ pos_ref += 1
+ align[pos_ref] = pos_hyp
+ hyp_err.append(0)
+ ref_err.append(0)
+ elif op == _OP_SUB:
+ pos_hyp += 1
+ pos_ref += 1
+ align[pos_ref] = pos_hyp
+ hyp_err.append(1)
+ ref_err.append(1)
+ elif op == _OP_INS:
+ pos_hyp += 1
+ hyp_err.append(1)
+ elif op == _OP_DEL:
+ pos_ref += 1
+ align[pos_ref] = pos_hyp
+ ref_err.append(1)
+ else:
+ raise Exception(f"unknown operation {op!r}")
+
+ return align, ref_err, hyp_err
+
+
+class BeamEditDistance:
+ """Edit distance with several features required for TER calculation.
+
+ * internal cache
+ * "beam" search
+ * tracking of edit operations
+
+ The internal self._cache works like this:
+
+ Keys are words of the hypothesis. Values are tuples (next_node, row) where:
+
+ * next_node is the cache for the next word in the sequence
+ * row is the stored row of the edit distance matrix
+
+ Effectively, caching allows to skip several rows in the edit distance
+ matrix calculation and instead, to initialize the computation with the last
+ matching matrix row.
+
+ Beam search, as implemented here, only explores a fixed-size sub-row of
+ candidates around the matrix diagonal (more precisely, it's a
+ "pseudo"-diagonal since we take the ratio of sequence lengths into account).
+
+ Tracking allows to reconstruct the optimal sequence of edit operations.
+
+ :param words_ref: A list of reference tokens.
+ """
+ def __init__(self, words_ref: List[str]):
+ """`BeamEditDistance` initializer."""
+ self._words_ref = words_ref
+ self._n_words_ref = len(self._words_ref)
+
+ # first row corresponds to insertion operations of the reference,
+ # so we do 1 edit operation per reference word
+ self._initial_row = [(i * _COST_INS, _OP_INS)
+ for i in range(self._n_words_ref + 1)]
+
+ self._cache = {} # type: Dict[str, Tuple]
+ self._cache_size = 0
+
+ # Precomputed empty matrix row. Contains infinities so that beam search
+ # avoids using the uninitialized cells.
+ self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1)
+
+ def __call__(self, words_hyp: List[str]) -> Tuple[int, str]:
+ """Calculate edit distance between self._words_ref and the hypothesis.
+
+ Uses cache to skip some of the computation.
+
+ :param words_hyp: Words in translation hypothesis.
+ :return: Edit distance score.
+ """
+
+ # skip initial words in the hypothesis for which we already know the
+ # edit distance
+ start_position, dist = self._find_cache(words_hyp)
+
+ # calculate the rest of the edit distance matrix
+ edit_distance, newly_created_matrix, trace = self._edit_distance(
+ words_hyp, start_position, dist)
+
+ # update our cache with the newly calculated rows
+ self._add_cache(words_hyp, newly_created_matrix)
+
+ return edit_distance, trace
+
+ def _edit_distance(self, words_h: List[str], start_h: int,
+ cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]:
+ """Actual edit distance calculation.
+
+ Can be initialized with the last cached row and a start position in
+ the hypothesis that it corresponds to.
+
+ :param words_h: Words in translation hypothesis.
+ :param start_h: Position from which to start the calculation.
+ (This is zero if no cache match was found.)
+ :param cache: Precomputed rows corresponding to edit distance matrix
+ before `start_h`.
+ :return: Edit distance value, newly computed rows to update the
+ cache, trace.
+ """
+
+ n_words_h = len(words_h)
+
+ # initialize the rest of the matrix with infinite edit distances
+ rest_empty = [list(self._empty_row)
+ for _ in range(n_words_h - start_h)]
+
+ dist = cache + rest_empty
+
+ assert len(dist) == n_words_h + 1
+
+ length_ratio = self._n_words_ref / n_words_h if words_h else 1
+
+ # in some crazy sentences, the difference in length is so large that
+ # we may end up with zero overlap with previous row
+ if _BEAM_WIDTH < length_ratio / 2:
+ beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH)
+ else:
+ beam_width = _BEAM_WIDTH
+
+ # calculate the Levenshtein distance
+ for i in range(start_h + 1, n_words_h + 1):
+ pseudo_diag = math.floor(i * length_ratio)
+ min_j = max(0, pseudo_diag - beam_width)
+ max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width)
+
+ if i == n_words_h:
+ max_j = self._n_words_ref + 1
+
+ for j in range(min_j, max_j):
+ if j == 0:
+ dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL)
+ else:
+ if words_h[i - 1] == self._words_ref[j - 1]:
+ cost_sub = 0
+ op_sub = _OP_NOP
+ else:
+ cost_sub = _COST_SUB
+ op_sub = _OP_SUB
+
+ # Tercom prefers no-op/sub, then insertion, then deletion.
+ # But since we flip the trace and compute the alignment from
+ # the inverse, we need to swap order of insertion and
+ # deletion in the preference.
+ ops = (
+ (dist[i - 1][j - 1][0] + cost_sub, op_sub),
+ (dist[i - 1][j][0] + _COST_DEL, _OP_DEL),
+ (dist[i][j - 1][0] + _COST_INS, _OP_INS),
+ )
+
+ for op_cost, op_name in ops:
+ if dist[i][j][0] > op_cost:
+ dist[i][j] = op_cost, op_name
+
+ # get the trace
+ trace = ""
+ i = n_words_h
+ j = self._n_words_ref
+
+ while i > 0 or j > 0:
+ op = dist[i][j][1]
+ trace = op + trace
+ if op in (_OP_SUB, _OP_NOP):
+ i -= 1
+ j -= 1
+ elif op == _OP_INS:
+ j -= 1
+ elif op == _OP_DEL:
+ i -= 1
+ else:
+ raise Exception(f"unknown operation {op!r}")
+
+ return dist[-1][-1][0], dist[len(cache):], trace
+
+ def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]):
+ """Add newly computed rows to cache.
+
+ Since edit distance is only calculated on the hypothesis suffix that
+ was not in cache, the number of rows in `mat` may be shorter than
+ hypothesis length. In that case, we skip over these initial words.
+
+ :param words_hyp: Hypothesis words.
+ :param mat: Edit distance matrix rows for each position.
+ """
+ if self._cache_size >= _MAX_CACHE_SIZE:
+ return
+
+ node = self._cache
+
+ n_mat = len(mat)
+
+ # how many initial words to skip
+ skip_num = len(words_hyp) - n_mat
+
+ # jump through the cache to the current position
+ for i in range(skip_num):
+ node = node[words_hyp[i]][0]
+
+ assert len(words_hyp[skip_num:]) == n_mat
+
+ # update cache with newly computed rows
+ for word, row in zip(words_hyp[skip_num:], mat):
+ if word not in node:
+ node[word] = ({}, tuple(row))
+ self._cache_size += 1
+ value = node[word]
+ node = value[0]
+
+ def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]:
+ """Find the already computed rows of the edit distance matrix in cache.
+
+ Returns a partially computed edit distance matrix.
+
+ :param words_hyp: Translation hypothesis.
+ :return: Tuple (start position, dist).
+ """
+ node = self._cache
+ start_position = 0
+ dist = [self._initial_row]
+ for word in words_hyp:
+ if word in node:
+ start_position += 1
+ node, row = node[word]
+ dist.append(row)
+ else:
+ break
+
+ return start_position, dist
diff --git a/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py b/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py
new file mode 100644
index 0000000000000000000000000000000000000000..40f8221853ac651502435fae3efd9db6a7f7aa04
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sacrebleu/metrics/ter.py
@@ -0,0 +1,195 @@
+"""The implementation of the TER metric (Snover et al., 2006)."""
+
+# Copyright 2020 Memsource
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from typing import List, Dict, Sequence, Optional, Any
+
+from ..tokenizers.tokenizer_ter import TercomTokenizer
+from ..utils import sum_of_lists
+from .base import Score, Signature, Metric
+from .lib_ter import translation_edit_rate
+
+
+class TERSignature(Signature):
+ """A convenience class to represent the reproducibility signature for TER.
+
+ :param args: key-value dictionary passed from the actual metric instance.
+ """
+ def __init__(self, args: dict):
+ """`TERSignature` initializer."""
+ super().__init__(args)
+ self._abbr.update({
+ 'case': 'c',
+ 'tok': 't',
+ 'norm': 'nr',
+ 'punct': 'pn',
+ 'asian': 'as',
+ })
+
+ self.info.update({
+ 'case': 'mixed' if args['case_sensitive'] else 'lc',
+ 'tok': args['tokenizer_signature'],
+ 'norm': args['normalized'],
+ 'punct': not args['no_punct'],
+ 'asian': args['asian_support'],
+ })
+
+
+class TERScore(Score):
+ """A convenience class to represent TER scores.
+
+ :param score: The TER score.
+ :param num_edits: The cumulative number of edits.
+ :param ref_length: The cumulative average reference length.
+ """
+ def __init__(self, score: float, num_edits: float, ref_length: float):
+ """`TERScore` initializer."""
+ super().__init__('TER', score)
+ self.num_edits = int(num_edits)
+ self.ref_length = ref_length
+
+
+class TER(Metric):
+ """Translation edit rate (TER). A near-exact reimplementation of the Tercom
+ algorithm, produces identical results on all "sane" outputs.
+
+ Tercom original implementation: https://github.com/jhclark/tercom
+
+ The beam edit distance algorithm uses a slightly different approach (we stay
+ around the diagonal which is faster, at least in Python) so in some
+ (extreme) corner cases, the output could differ.
+
+ Caching in the edit distance is based partly on the PyTer package by Hiroyuki
+ Tanaka (MIT license). (https://github.com/aflc/pyter)
+
+ :param normalized: Enable character normalization. By default, normalizes a couple of things such as
+ newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When
+ 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e.
+ split them down to the character level.
+ :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical
+ punctuation markers in Asian languages (CJK).
+ :param asian_support: Enable special treatment of Asian characters. This option only has an effect when
+ 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK)
+ characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support',
+ specific unicode ranges for CJK and full-width punctuations are also removed.
+ :param case_sensitive: If `True`, does not lowercase sentences.
+ :param references: A sequence of reference documents with document being
+ defined as a sequence of reference strings. If given, the reference info
+ will be pre-computed and cached for faster re-computation across many systems.
+ """
+
+ _SIGNATURE_TYPE = TERSignature
+
+ def __init__(self, normalized: bool = False,
+ no_punct: bool = False,
+ asian_support: bool = False,
+ case_sensitive: bool = False,
+ references: Optional[Sequence[Sequence[str]]] = None):
+ """`TER` initializer."""
+ super().__init__()
+
+ self.no_punct = no_punct
+ self.normalized = normalized
+ self.asian_support = asian_support
+ self.case_sensitive = case_sensitive
+
+ self.tokenizer = TercomTokenizer(
+ normalized=self.normalized,
+ no_punct=self.no_punct,
+ asian_support=self.asian_support,
+ case_sensitive=self.case_sensitive,
+ )
+ self.tokenizer_signature = self.tokenizer.signature()
+
+ if references is not None:
+ self._ref_cache = self._cache_references(references)
+
+ def _preprocess_segment(self, sent: str) -> str:
+ """Given a sentence, apply tokenization if enabled.
+
+ :param sent: The input sentence string.
+ :return: The pre-processed output string.
+ """
+ return self.tokenizer(sent.rstrip())
+
+ def _compute_score_from_stats(self, stats: List[float]) -> TERScore:
+ """Computes the final score from already aggregated statistics.
+
+ :param stats: A list or numpy array of segment-level statistics.
+ :return: A `TERScore` object.
+ """
+ total_edits, sum_ref_lengths = stats[0], stats[1]
+
+ if sum_ref_lengths > 0:
+ score = total_edits / sum_ref_lengths
+ elif total_edits > 0:
+ score = 1.0 # empty reference(s) and non-empty hypothesis
+ else:
+ score = 0.0 # both reference(s) and hypothesis are empty
+
+ return TERScore(100 * score, total_edits, sum_ref_lengths)
+
+ def _aggregate_and_compute(self, stats: List[List[float]]) -> TERScore:
+ """Computes the final TER score given the pre-computed corpus statistics.
+
+ :param stats: A list of segment-level statistics
+ :return: A `TERScore` instance.
+ """
+ return self._compute_score_from_stats(sum_of_lists(stats))
+
+ def _compute_segment_statistics(
+ self, hypothesis: str, ref_kwargs: Dict) -> List[float]:
+ """Given a (pre-processed) hypothesis sentence and already computed
+ reference words, returns the segment statistics required to compute
+ the full TER score.
+
+ :param hypothesis: Hypothesis sentence.
+ :param ref_kwargs: A dictionary with `ref_words` key which is a list
+ where each sublist contains reference words.
+ :return: A two-element list that contains the 'minimum number of edits'
+ and 'the average reference length'.
+ """
+
+ ref_lengths = 0
+ best_num_edits = int(1e16)
+
+ words_hyp = hypothesis.split()
+
+ # Iterate the references
+ ref_words = ref_kwargs['ref_words']
+ for words_ref in ref_words:
+ num_edits, ref_len = translation_edit_rate(words_hyp, words_ref)
+ ref_lengths += ref_len
+ if num_edits < best_num_edits:
+ best_num_edits = num_edits
+
+ avg_ref_len = ref_lengths / len(ref_words)
+ return [best_num_edits, avg_ref_len]
+
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
+ """Given a list of reference segments, applies pre-processing & tokenization
+ and returns list of tokens for each reference.
+
+ :param refs: A sequence of strings.
+ :return: A dictionary that will be passed to `_compute_segment_statistics()`
+ through keyword arguments.
+ """
+ ref_words = []
+
+ for ref in refs:
+ ref_words.append(self._preprocess_segment(ref).split())
+
+ return {'ref_words': ref_words}
diff --git a/venv/lib/python3.10/site-packages/tabledata/__init__.py b/venv/lib/python3.10/site-packages/tabledata/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f41c368069a1c7002ad98d98095aff52c9d1e82c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/__init__.py
@@ -0,0 +1,24 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+from .__version__ import __author__, __copyright__, __email__, __license__, __version__
+from ._common import convert_idx_to_alphabet
+from ._constant import PatternMatch
+from ._converter import to_value_matrix
+from ._core import TableData
+from ._logger import set_log_level, set_logger
+from .error import DataError, InvalidHeaderNameError, InvalidTableNameError, NameValidationError
+
+
+__all__ = (
+ "convert_idx_to_alphabet",
+ "set_logger",
+ "to_value_matrix",
+ "PatternMatch",
+ "TableData",
+ "DataError",
+ "InvalidHeaderNameError",
+ "InvalidTableNameError",
+ "NameValidationError",
+)
diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a5b75b56dc05eeefc698ad4929d72cf3880fe9d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_common.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8fc88f446f02f28ed74a7a92a163411d8de4326a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/tabledata/__pycache__/_constant.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/tabledata/__version__.py b/venv/lib/python3.10/site-packages/tabledata/__version__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fefee19a86f0a0eaf8f4d0e6e3c11e3af4ae6a83
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/__version__.py
@@ -0,0 +1,6 @@
+__author__ = "Tsuyoshi Hombashi"
+__copyright__ = f"Copyright 2017, {__author__}"
+__license__ = "MIT License"
+__version__ = "1.3.3"
+__maintainer__ = __author__
+__email__ = "tsuyoshi.hombashi@gmail.com"
diff --git a/venv/lib/python3.10/site-packages/tabledata/_common.py b/venv/lib/python3.10/site-packages/tabledata/_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..944e9474385d5ac4cace526f532564308a1fd13a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/_common.py
@@ -0,0 +1,12 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+
+def convert_idx_to_alphabet(idx: int) -> str:
+ if idx < 26:
+ return chr(65 + idx)
+
+ div, mod = divmod(idx, 26)
+
+ return convert_idx_to_alphabet(div - 1) + convert_idx_to_alphabet(mod)
diff --git a/venv/lib/python3.10/site-packages/tabledata/_constant.py b/venv/lib/python3.10/site-packages/tabledata/_constant.py
new file mode 100644
index 0000000000000000000000000000000000000000..722f1372ff8416da2a9c5733c11d8351e87c792f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/_constant.py
@@ -0,0 +1,11 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+import enum
+
+
+@enum.unique
+class PatternMatch(enum.Enum):
+ OR = 0
+ AND = 1
diff --git a/venv/lib/python3.10/site-packages/tabledata/_converter.py b/venv/lib/python3.10/site-packages/tabledata/_converter.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce0799f5298220aa225739c2eb4825706bf827b1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/_converter.py
@@ -0,0 +1,35 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+from typing import Any, List, Sequence, Tuple
+
+from .error import DataError
+
+
+Row = Tuple[int, Any]
+
+
+def to_value_matrix(headers: Sequence[str], value_matrix: Sequence[Any]) -> List[Row]:
+ if not value_matrix:
+ return []
+
+ return [_to_row(headers, values, row_idx)[1] for row_idx, values in enumerate(value_matrix)]
+
+
+def _to_row(headers: Sequence[str], values: Any, row_idx: int) -> Row:
+ if headers:
+ try:
+ values = values._asdict()
+ except AttributeError:
+ pass
+
+ try:
+ return (row_idx, [values.get(header) for header in headers])
+ except (TypeError, AttributeError):
+ pass
+
+ if not isinstance(values, (tuple, list)):
+ raise DataError(f"row must be a list or tuple: actual={type(values)}")
+
+ return (row_idx, values)
diff --git a/venv/lib/python3.10/site-packages/tabledata/_core.py b/venv/lib/python3.10/site-packages/tabledata/_core.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d16517eefdafae0ab12e555fd287242352e7968
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/_core.py
@@ -0,0 +1,510 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+import copy
+import re
+from collections import OrderedDict, namedtuple
+from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, Tuple, Union
+
+import dataproperty as dp
+import typepy
+from dataproperty import DataPropertyMatrix
+from dataproperty.typing import TypeHint
+from typepy import Nan
+
+from ._constant import PatternMatch
+from ._converter import to_value_matrix
+from ._logger import logger
+
+
+if TYPE_CHECKING:
+ import pandas
+
+
+class TableData:
+ """
+ Class to represent a table data structure.
+
+ :param table_name: Name of the table.
+ :param headers: Table header names.
+ :param rows: Data of the table.
+ """
+
+ def __init__(
+ self,
+ table_name: Optional[str],
+ headers: Sequence[str],
+ rows: Sequence,
+ dp_extractor: Optional[dp.DataPropertyExtractor] = None,
+ type_hints: Optional[Sequence[Union[str, TypeHint]]] = None,
+ max_workers: Optional[int] = None,
+ max_precision: Optional[int] = None,
+ ) -> None:
+ self.__table_name = table_name
+ self.__value_matrix: List[List[Any]] = []
+ self.__value_dp_matrix: Optional[DataPropertyMatrix] = None
+
+ if rows:
+ self.__rows = rows
+ else:
+ self.__rows = []
+
+ if dp_extractor:
+ self.__dp_extractor = copy.deepcopy(dp_extractor)
+ else:
+ self.__dp_extractor = dp.DataPropertyExtractor(max_precision=max_precision)
+
+ if type_hints:
+ self.__dp_extractor.column_type_hints = type_hints
+
+ self.__dp_extractor.strip_str_header = '"'
+
+ if max_workers:
+ self.__dp_extractor.max_workers = max_workers
+
+ if not headers:
+ self.__dp_extractor.headers = []
+ else:
+ self.__dp_extractor.headers = headers
+
+ def __repr__(self) -> str:
+ element_list = [f"table_name={self.table_name}"]
+
+ try:
+ element_list.append("headers=[{}]".format(", ".join(self.headers)))
+ except TypeError:
+ element_list.append("headers=None")
+
+ element_list.extend([f"cols={self.num_columns}", f"rows={self.num_rows}"])
+
+ return ", ".join(element_list)
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, TableData):
+ return False
+
+ return self.equals(other, cmp_by_dp=False)
+
+ def __ne__(self, other: Any) -> bool:
+ if not isinstance(other, TableData):
+ return True
+
+ return not self.equals(other, cmp_by_dp=False)
+
+ @property
+ def table_name(self) -> Optional[str]:
+ """str: Name of the table."""
+
+ return self.__table_name
+
+ @table_name.setter
+ def table_name(self, value: Optional[str]) -> None:
+ self.__table_name = value
+
+ @property
+ def headers(self) -> Sequence[str]:
+ """Sequence[str]: Table header names."""
+
+ return self.__dp_extractor.headers
+
+ @property
+ def rows(self) -> Sequence:
+ """Sequence: Original rows of tabular data."""
+
+ return self.__rows
+
+ @property
+ def value_matrix(self) -> DataPropertyMatrix:
+ """DataPropertyMatrix: Converted rows of tabular data."""
+
+ if self.__value_matrix:
+ return self.__value_matrix
+
+ self.__value_matrix = [
+ [value_dp.data for value_dp in value_dp_list] for value_dp_list in self.value_dp_matrix
+ ]
+
+ return self.__value_matrix
+
+ @property
+ def has_value_dp_matrix(self) -> bool:
+ return self.__value_dp_matrix is not None
+
+ @property
+ def max_workers(self) -> int:
+ return self.__dp_extractor.max_workers
+
+ @max_workers.setter
+ def max_workers(self, value: Optional[int]) -> None:
+ self.__dp_extractor.max_workers = value
+
+ @property
+ def num_rows(self) -> Optional[int]:
+ """Optional[int]:
+ Number of rows in the tabular data.
+ |None| if the ``rows`` is neither list nor tuple.
+ """
+
+ try:
+ return len(self.rows)
+ except TypeError:
+ return None
+
+ @property
+ def num_columns(self) -> Optional[int]:
+ if typepy.is_not_empty_sequence(self.headers):
+ return len(self.headers)
+
+ try:
+ return len(self.rows[0])
+ except TypeError:
+ return None
+ except IndexError:
+ return 0
+
+ @property
+ def value_dp_matrix(self) -> DataPropertyMatrix:
+ """DataPropertyMatrix: DataProperty for table data."""
+
+ if self.__value_dp_matrix is None:
+ self.__value_dp_matrix = self.__dp_extractor.to_dp_matrix(
+ to_value_matrix(self.headers, self.rows)
+ )
+
+ return self.__value_dp_matrix
+
+ @property
+ def header_dp_list(self) -> List[dp.DataProperty]:
+ return self.__dp_extractor.to_header_dp_list()
+
+ @property
+ def column_dp_list(self) -> List[dp.ColumnDataProperty]:
+ return self.__dp_extractor.to_column_dp_list(self.value_dp_matrix)
+
+ @property
+ def dp_extractor(self) -> dp.DataPropertyExtractor:
+ return self.__dp_extractor
+
+ def is_empty_header(self) -> bool:
+ """bool: |True| if the data :py:attr:`.headers` is empty."""
+
+ return typepy.is_empty_sequence(self.headers)
+
+ def is_empty_rows(self) -> bool:
+ """
+ :return: |True| if the tabular data has no rows.
+ :rtype: bool
+ """
+
+ return self.num_rows == 0
+
+ def is_empty(self) -> bool:
+ """
+ :return:
+ |True| if the data :py:attr:`.headers` or
+ :py:attr:`.value_matrix` is empty.
+ :rtype: bool
+ """
+
+ return any([self.is_empty_header(), self.is_empty_rows()])
+
+ def equals(self, other: "TableData", cmp_by_dp: bool = True) -> bool:
+ if cmp_by_dp:
+ return self.__equals_dp(other)
+
+ return self.__equals_raw(other)
+
+ def __equals_base(self, other: "TableData") -> bool:
+ compare_item_list = [self.table_name == other.table_name]
+
+ if self.num_rows is not None:
+ compare_item_list.append(self.num_rows == other.num_rows)
+
+ return all(compare_item_list)
+
+ def __equals_raw(self, other: "TableData") -> bool:
+ if not self.__equals_base(other):
+ return False
+
+ if self.headers != other.headers:
+ return False
+
+ for lhs_row, rhs_row in zip(self.rows, other.rows):
+ if len(lhs_row) != len(rhs_row):
+ return False
+
+ if not all(
+ [
+ lhs == rhs
+ for lhs, rhs in zip(lhs_row, rhs_row)
+ if not Nan(lhs).is_type() and not Nan(rhs).is_type()
+ ]
+ ):
+ return False
+
+ return True
+
+ def __equals_dp(self, other: "TableData") -> bool:
+ if not self.__equals_base(other):
+ return False
+
+ if self.header_dp_list != other.header_dp_list:
+ return False
+
+ if self.value_dp_matrix is None or other.value_dp_matrix is None:
+ return False
+
+ for lhs_list, rhs_list in zip(self.value_dp_matrix, other.value_dp_matrix):
+ if len(lhs_list) != len(rhs_list):
+ return False
+
+ if any([lhs != rhs for lhs, rhs in zip(lhs_list, rhs_list)]):
+ return False
+
+ return True
+
+ def in_tabledata_list(self, other: Sequence["TableData"], cmp_by_dp: bool = True) -> bool:
+ for table_data in other:
+ if self.equals(table_data, cmp_by_dp=cmp_by_dp):
+ return True
+
+ return False
+
+ def validate_rows(self) -> None:
+ """
+ :raises ValueError:
+ """
+
+ invalid_row_idx_list = []
+
+ for row_idx, row in enumerate(self.rows):
+ if isinstance(row, (list, tuple)) and len(self.headers) != len(row):
+ invalid_row_idx_list.append(row_idx)
+
+ if isinstance(row, dict):
+ if not all([header in row for header in self.headers]):
+ invalid_row_idx_list.append(row_idx)
+
+ if not invalid_row_idx_list:
+ return
+
+ for invalid_row_idx in invalid_row_idx_list:
+ logger.debug(f"invalid row (line={invalid_row_idx}): {self.rows[invalid_row_idx]}")
+
+ raise ValueError(
+ "table header length and row length are mismatch:\n"
+ + f" header(len={len(self.headers)}): {self.headers}\n"
+ + " # of miss match rows: {} ouf of {}\n".format(
+ len(invalid_row_idx_list), self.num_rows
+ )
+ )
+
+ def as_dict(self, default_key: str = "table") -> Dict[str, List["OrderedDict[str, Any]"]]:
+ """
+ Args:
+ default_key:
+ Key of a returning dictionary when the ``table_name`` is empty.
+
+ Returns:
+ dict: Table data as a |dict| instance.
+
+ Sample Code:
+ .. code:: python
+
+ from tabledata import TableData
+
+ TableData(
+ "sample",
+ ["a", "b"],
+ [[1, 2], [3.3, 4.4]]
+ ).as_dict()
+
+ Output:
+ .. code:: json
+
+ {'sample': [OrderedDict([('a', 1), ('b', 2)]), OrderedDict([('a', 3.3), ('b', 4.4)])]}
+ """ # noqa
+
+ dict_body = []
+ for row in self.value_matrix:
+ if not row:
+ continue
+
+ values = [
+ (header, value) for header, value in zip(self.headers, row) if value is not None
+ ]
+
+ if not values:
+ continue
+
+ dict_body.append(OrderedDict(values))
+
+ table_name = self.table_name
+ if not table_name:
+ table_name = default_key
+
+ return {table_name: dict_body}
+
+ def as_tuple(self) -> Iterator[Tuple]:
+ """
+ :return: Rows of the tuple.
+ :rtype: list of |namedtuple|
+
+ :Sample Code:
+ .. code:: python
+
+ from tabledata import TableData
+
+ records = TableData(
+ "sample",
+ ["a", "b"],
+ [[1, 2], [3.3, 4.4]]
+ ).as_tuple()
+ for record in records:
+ print(record)
+
+ :Output:
+ .. code-block:: none
+
+ Row(a=1, b=2)
+ Row(a=Decimal('3.3'), b=Decimal('4.4'))
+ """
+
+ Row = namedtuple("Row", self.headers) # type: ignore
+
+ for value_dp_list in self.value_dp_matrix:
+ if typepy.is_empty_sequence(value_dp_list):
+ continue
+
+ row = Row(*(value_dp.data for value_dp in value_dp_list))
+
+ yield row
+
+ def as_dataframe(self) -> "pandas.DataFrame":
+ """
+ :return: Table data as a ``pandas.DataFrame`` instance.
+ :rtype: pandas.DataFrame
+
+ :Sample Code:
+ .. code-block:: python
+
+ from tabledata import TableData
+
+ TableData(
+ "sample",
+ ["a", "b"],
+ [[1, 2], [3.3, 4.4]]
+ ).as_dataframe()
+
+ :Output:
+ .. code-block:: none
+
+ a b
+ 0 1 2
+ 1 3.3 4.4
+
+ :Dependency Packages:
+ - `pandas `__
+ """
+
+ try:
+ from pandas import DataFrame
+ except ImportError:
+ raise RuntimeError("required 'pandas' package to execute as_dataframe method")
+
+ dataframe = DataFrame(self.value_matrix)
+ if not self.is_empty_header():
+ dataframe.columns = self.headers
+
+ return dataframe
+
+ def transpose(self) -> "TableData":
+ return TableData(
+ self.table_name,
+ self.headers,
+ [row for row in zip(*self.rows)],
+ max_workers=self.max_workers,
+ )
+
+ def filter_column(
+ self,
+ patterns: Optional[str] = None,
+ is_invert_match: bool = False,
+ is_re_match: bool = False,
+ pattern_match: PatternMatch = PatternMatch.OR,
+ ) -> "TableData":
+ logger.debug(
+ "filter_column: patterns={}, is_invert_match={}, "
+ "is_re_match={}, pattern_match={}".format(
+ patterns, is_invert_match, is_re_match, pattern_match
+ )
+ )
+
+ if not patterns:
+ return self
+
+ match_header_list = []
+ match_column_matrix = []
+
+ if pattern_match == PatternMatch.OR:
+ match_method = any
+ elif pattern_match == PatternMatch.AND:
+ match_method = all
+ else:
+ raise ValueError(f"unknown matching: {pattern_match}")
+
+ for header, column in zip(self.headers, zip(*self.rows)):
+ is_match_list = []
+ for pattern in patterns:
+ is_match = self.__is_match(header, pattern, is_re_match)
+
+ is_match_list.append(
+ any([is_match and not is_invert_match, not is_match and is_invert_match])
+ )
+
+ if match_method(is_match_list):
+ match_header_list.append(header)
+ match_column_matrix.append(column)
+
+ logger.debug(
+ "filter_column: table={}, match_header_list={}".format(
+ self.table_name, match_header_list
+ )
+ )
+
+ return TableData(
+ self.table_name,
+ match_header_list,
+ list(zip(*match_column_matrix)),
+ max_workers=self.max_workers,
+ )
+
+ @staticmethod
+ def from_dataframe(
+ dataframe: "pandas.DataFrame",
+ table_name: str = "",
+ type_hints: Optional[Sequence[TypeHint]] = None,
+ max_workers: Optional[int] = None,
+ ) -> "TableData":
+ """
+ Initialize TableData instance from a pandas.DataFrame instance.
+
+ :param pandas.DataFrame dataframe:
+ :param str table_name: Table name to create.
+ """
+
+ return TableData(
+ table_name,
+ list(dataframe.columns.values),
+ dataframe.values.tolist(),
+ type_hints=type_hints,
+ max_workers=max_workers,
+ )
+
+ @staticmethod
+ def __is_match(header: str, pattern: str, is_re_match: bool) -> bool:
+ if is_re_match:
+ return re.search(pattern, header) is not None
+
+ return header == pattern
diff --git a/venv/lib/python3.10/site-packages/tabledata/error.py b/venv/lib/python3.10/site-packages/tabledata/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..35084f8b1af8fa41a12f4fcaf5f0710771019f41
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/error.py
@@ -0,0 +1,27 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+
+class NameValidationError(ValueError):
+ """
+ Exception raised when a name is invalid.
+ """
+
+
+class InvalidTableNameError(NameValidationError):
+ """
+ Exception raised when a table name is invalid.
+ """
+
+
+class InvalidHeaderNameError(NameValidationError):
+ """
+ Exception raised when a table header name is invalid.
+ """
+
+
+class DataError(ValueError):
+ """
+ Exception raised when data is invalid as tabular data.
+ """
diff --git a/venv/lib/python3.10/site-packages/tabledata/normalizer.py b/venv/lib/python3.10/site-packages/tabledata/normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f5c383f51c57c49eeeb611679d3b5c8fe90ff52
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabledata/normalizer.py
@@ -0,0 +1,207 @@
+"""
+.. codeauthor:: Tsuyoshi Hombashi
+"""
+
+import abc
+import warnings
+from typing import List, Sequence
+
+import typepy
+from dataproperty.typing import TypeHint
+
+from ._core import TableData
+from ._logger import logger
+from .error import InvalidHeaderNameError, InvalidTableNameError
+
+
+class TableDataNormalizerInterface(metaclass=abc.ABCMeta):
+ """
+ Interface class to validate and normalize data of |TableData|.
+ """
+
+ @abc.abstractmethod
+ def validate(self) -> None: # pragma: no cover
+ pass
+
+ @abc.abstractmethod
+ def normalize(self) -> TableData: # pragma: no cover
+ pass
+
+
+class AbstractTableDataNormalizer(TableDataNormalizerInterface):
+ @property
+ def _type_hints(self) -> List[TypeHint]:
+ return self._tabledata.dp_extractor.column_type_hints
+
+ def __init__(self, tabledata: TableData) -> None:
+ self._tabledata = tabledata
+
+ def validate(self) -> None:
+ if not self._tabledata.table_name:
+ raise ValueError("table_name must not be empty")
+
+ self._validate_table_name(self._tabledata.table_name)
+ self._validate_headers()
+
+ def sanitize(self): # type: ignore
+ warnings.warn(
+ "'sanitize' method is deprecated and will be removed in the future."
+ " use 'normalize' method instead.",
+ DeprecationWarning,
+ )
+
+ return self.normalize()
+
+ def normalize(self) -> TableData:
+ """
+ :return: Sanitized table data.
+ :rtype: tabledata.TableData
+ """
+
+ logger.debug(f"normalize: {type(self).__name__}")
+
+ normalize_headers = self._normalize_headers()
+
+ return TableData(
+ self.__normalize_table_name(),
+ normalize_headers,
+ self._normalize_rows(normalize_headers),
+ dp_extractor=self._tabledata.dp_extractor,
+ type_hints=self._type_hints,
+ max_workers=self._tabledata.max_workers,
+ )
+
+ @abc.abstractmethod
+ def _preprocess_table_name(self) -> str:
+ """
+ This method is always called before table name validation.
+ You must return preprocessed table name.
+ """
+
+ @abc.abstractmethod
+ def _validate_table_name(self, table_name: str) -> None:
+ """
+ Must raise :py:class:`~.InvalidTableNameError`
+ when you consider the table name invalid.
+
+ :param str header: Table name to validate.
+ :raises tabledata.InvalidTableNameError:
+ If the table name is invalid.
+ |raises_validate_table_name|
+ """
+
+ @abc.abstractmethod
+ def _normalize_table_name(self, table_name: str) -> str:
+ """
+ Must return a valid table name.
+ The table name must be considered to be a valid name by
+ :py:meth:`~._validate_table_name` method.
+
+ This method called when :py:meth:`~._validate_table_name` method raise
+ :py:class:`~.InvalidTableNameError`.
+
+ :param str table_name: Table name to normalize.
+ :return: Sanitized table name.
+ :rtype: str
+ """
+
+ @abc.abstractmethod
+ def _preprocess_header(self, col_idx: int, header: str) -> str:
+ """
+ This method is always called before a header validation.
+ You must return preprocessed header.
+ """
+
+ @abc.abstractmethod
+ def _validate_header(self, header: str) -> None:
+ """
+ No operation.
+
+ This method called for each table header. Override this method
+ in a subclass if you want to detect invalid table header elements.
+ Raise :py:class:`~.InvalidHeaderNameError` if an invalid
+ header element found.
+
+ :param str header: Table header name.
+ :raises tabledata.InvalidHeaderNameError:
+ If the ``header`` is invalid.
+ """
+
+ @abc.abstractmethod
+ def _normalize_header(self, header: str) -> str:
+ """
+ Must return a valid header name.
+ This method called when :py:meth:`~._validate_header` method raise
+ :py:class:`~.InvalidHeaderNameError`.
+ Override this method in subclass if you want to rename invalid
+ table header element.
+
+ :param str header: Header name to normalize.
+ :return: Renamed header name.
+ :rtype: str
+ """
+
+ def _normalize_rows(self, normalize_headers: Sequence[str]) -> List:
+ return list(self._tabledata.rows)
+
+ def _validate_headers(self) -> None:
+ for header in self._tabledata.headers:
+ self._validate_header(header)
+
+ def __normalize_table_name(self) -> str:
+ preprocessed_table_name = self._preprocess_table_name()
+
+ try:
+ self._validate_table_name(preprocessed_table_name)
+ new_table_name = preprocessed_table_name
+ except InvalidTableNameError:
+ new_table_name = self._normalize_table_name(preprocessed_table_name)
+ self._validate_table_name(new_table_name)
+
+ return new_table_name
+
+ def _normalize_headers(self) -> List[str]:
+ new_header_list = []
+
+ for col_idx, header in enumerate(self._tabledata.headers):
+ header = self._preprocess_header(col_idx, header)
+
+ try:
+ self._validate_header(header)
+ new_header = header
+ except InvalidHeaderNameError:
+ new_header = self._normalize_header(header)
+ self._validate_header(new_header)
+
+ new_header_list.append(new_header)
+
+ return new_header_list
+
+
+class TableDataNormalizer(AbstractTableDataNormalizer):
+ def _preprocess_table_name(self) -> str:
+ if not self._tabledata.table_name:
+ return ""
+
+ return self._tabledata.table_name
+
+ def _validate_table_name(self, table_name: str) -> None:
+ try:
+ typepy.String(table_name).validate()
+ except TypeError as e:
+ raise InvalidTableNameError(e)
+
+ def _normalize_table_name(self, table_name: str) -> str:
+ return str(typepy.String(table_name).force_convert())
+
+ def _preprocess_header(self, col_idx: int, header: str) -> str:
+ return header
+
+ def _validate_header(self, header: str) -> None:
+ try:
+ typepy.String(header).validate()
+ except TypeError as e:
+ raise InvalidHeaderNameError(e)
+
+ def _normalize_header(self, header: str) -> str:
+ return str(typepy.String(header).force_convert())
diff --git a/venv/lib/python3.10/site-packages/tabledata/py.typed b/venv/lib/python3.10/site-packages/tabledata/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/tabulate/__init__.py b/venv/lib/python3.10/site-packages/tabulate/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..503df34848d710b62099e514dee3202de29c3f2a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/tabulate/__init__.py
@@ -0,0 +1,2716 @@
+"""Pretty-print tabular data."""
+
+from collections import namedtuple
+from collections.abc import Iterable, Sized
+from html import escape as htmlescape
+from itertools import chain, zip_longest as izip_longest
+from functools import reduce, partial
+import io
+import re
+import math
+import textwrap
+import dataclasses
+
+try:
+ import wcwidth # optional wide-character (CJK) support
+except ImportError:
+ wcwidth = None
+
+
+def _is_file(f):
+ return isinstance(f, io.IOBase)
+
+
+__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
+try:
+ from .version import version as __version__ # noqa: F401
+except ImportError:
+ pass # running __init__.py as a script, AppVeyor pytests
+
+
+# minimum extra space in headers
+MIN_PADDING = 2
+
+# Whether or not to preserve leading/trailing whitespace in data.
+PRESERVE_WHITESPACE = False
+
+_DEFAULT_FLOATFMT = "g"
+_DEFAULT_INTFMT = ""
+_DEFAULT_MISSINGVAL = ""
+# default align will be overwritten by "left", "center" or "decimal"
+# depending on the formatter
+_DEFAULT_ALIGN = "default"
+
+
+# if True, enable wide-character (CJK) support
+WIDE_CHARS_MODE = wcwidth is not None
+
+# Constant that can be used as part of passed rows to generate a separating line
+# It is purposely an unprintable character, very unlikely to be used in a table
+SEPARATING_LINE = "\001"
+
+Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
+
+
+DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
+
+
+# A table structure is supposed to be:
+#
+# --- lineabove ---------
+# headerrow
+# --- linebelowheader ---
+# datarow
+# --- linebetweenrows ---
+# ... (more datarows) ...
+# --- linebetweenrows ---
+# last datarow
+# --- linebelow ---------
+#
+# TableFormat's line* elements can be
+#
+# - either None, if the element is not used,
+# - or a Line tuple,
+# - or a function: [col_widths], [col_alignments] -> string.
+#
+# TableFormat's *row elements can be
+#
+# - either None, if the element is not used,
+# - or a DataRow tuple,
+# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
+#
+# padding (an integer) is the amount of white space around data values.
+#
+# with_header_hide:
+#
+# - either None, to display all table elements unconditionally,
+# - or a list of elements not to be displayed if the table has column headers.
+#
+TableFormat = namedtuple(
+ "TableFormat",
+ [
+ "lineabove",
+ "linebelowheader",
+ "linebetweenrows",
+ "linebelow",
+ "headerrow",
+ "datarow",
+ "padding",
+ "with_header_hide",
+ ],
+)
+
+
+def _is_separating_line(row):
+ row_type = type(row)
+ is_sl = (row_type == list or row_type == str) and (
+ (len(row) >= 1 and row[0] == SEPARATING_LINE)
+ or (len(row) >= 2 and row[1] == SEPARATING_LINE)
+ )
+ return is_sl
+
+
+def _pipe_segment_with_colons(align, colwidth):
+ """Return a segment of a horizontal line with optional colons which
+ indicate column's alignment (as in `pipe` output format)."""
+ w = colwidth
+ if align in ["right", "decimal"]:
+ return ("-" * (w - 1)) + ":"
+ elif align == "center":
+ return ":" + ("-" * (w - 2)) + ":"
+ elif align == "left":
+ return ":" + ("-" * (w - 1))
+ else:
+ return "-" * w
+
+
+def _pipe_line_with_colons(colwidths, colaligns):
+ """Return a horizontal line with optional colons to indicate column's
+ alignment (as in `pipe` output format)."""
+ if not colaligns: # e.g. printing an empty data frame (github issue #15)
+ colaligns = [""] * len(colwidths)
+ segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
+ return "|" + "|".join(segments) + "|"
+
+
+def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
+ alignment = {
+ "left": "",
+ "right": 'align="right"| ',
+ "center": 'align="center"| ',
+ "decimal": 'align="right"| ',
+ }
+ # hard-coded padding _around_ align attribute and value together
+ # rather than padding parameter which affects only the value
+ values_with_attrs = [
+ " " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns)
+ ]
+ colsep = separator * 2
+ return (separator + colsep.join(values_with_attrs)).rstrip()
+
+
+def _textile_row_with_attrs(cell_values, colwidths, colaligns):
+ cell_values[0] += " "
+ alignment = {"left": "<.", "right": ">.", "center": "=.", "decimal": ">."}
+ values = (alignment.get(a, "") + v for a, v in zip(colaligns, cell_values))
+ return "|" + "|".join(values) + "|"
+
+
+def _html_begin_table_without_header(colwidths_ignore, colaligns_ignore):
+ # this table header will be suppressed if there is a header row
+ return "\n"
+
+
+def _html_row_with_attrs(celltag, unsafe, cell_values, colwidths, colaligns):
+ alignment = {
+ "left": "",
+ "right": ' style="text-align: right;"',
+ "center": ' style="text-align: center;"',
+ "decimal": ' style="text-align: right;"',
+ }
+ if unsafe:
+ values_with_attrs = [
+ "<{0}{1}>{2}{0}>".format(celltag, alignment.get(a, ""), c)
+ for c, a in zip(cell_values, colaligns)
+ ]
+ else:
+ values_with_attrs = [
+ "<{0}{1}>{2}{0}>".format(celltag, alignment.get(a, ""), htmlescape(c))
+ for c, a in zip(cell_values, colaligns)
+ ]
+ rowhtml = "{}
".format("".join(values_with_attrs).rstrip())
+ if celltag == "th": # it's a header row, create a new table header
+ rowhtml = f"\n\n{rowhtml}\n\n"
+ return rowhtml
+
+
+def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=""):
+ alignment = {
+ "left": "",
+ "right": '