diff --git a/.gitattributes b/.gitattributes index 3cf2c2547f713c41b5ce8bc3fa785319489f6a8a..3426c91cddcb76eaad23242c6a1deec14daaa98e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -66,3 +66,4 @@ llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_ llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/scipy/fft/_pocketfft/pypocketfft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c0e11581f8819d518d7664c7724c0ce63abad5ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81bf00d65473806844c2e1800e77c23d62a118255ab7c848c6619b444de4a6e +size 2586576 diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.py b/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f71368bab1a1bf44abe10d73a3e005c747a3bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.py @@ -0,0 +1,95 @@ +import os +import sys +import types +from collections.abc import MutableSequence +from functools import total_ordering +from typing import Type + +__version__ = "1.4.1" + +__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...] + + +NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool + + +@total_ordering +class FrozenList(MutableSequence): + __slots__ = ("_frozen", "_items") + + if sys.version_info >= (3, 9): + __class_getitem__ = classmethod(types.GenericAlias) + else: + + @classmethod + def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]: + return cls + + def __init__(self, items=None): + self._frozen = False + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + @property + def frozen(self): + return self._frozen + + def freeze(self): + self._frozen = True + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items[index] = value + + def __delitem__(self, index): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + del self._items[index] + + def __len__(self): + return self._items.__len__() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __eq__(self, other): + return list(self) == other + + def __le__(self, other): + return list(self) <= other + + def insert(self, pos, item): + if self._frozen: + raise RuntimeError("Cannot modify frozen list.") + self._items.insert(pos, item) + + def __repr__(self): + return f"" + + def __hash__(self): + if self._frozen: + return hash(tuple(self)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + +PyFrozenList = FrozenList + + +if not NO_EXTENSIONS: + try: + from ._frozenlist import FrozenList as CFrozenList # type: ignore + except ImportError: # pragma: no cover + pass + else: + FrozenList = CFrozenList # type: ignore diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae803ef6aad72f57e7379db5a2044a95f214df7b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/frozenlist/__init__.pyi @@ -0,0 +1,47 @@ +from typing import ( + Generic, + Iterable, + Iterator, + List, + MutableSequence, + Optional, + TypeVar, + Union, + overload, +) + +_T = TypeVar("_T") +_Arg = Union[List[_T], Iterable[_T]] + +class FrozenList(MutableSequence[_T], Generic[_T]): + def __init__(self, items: Optional[_Arg[_T]] = None) -> None: ... + @property + def frozen(self) -> bool: ... + def freeze(self) -> None: ... + @overload + def __getitem__(self, i: int) -> _T: ... + @overload + def __getitem__(self, s: slice) -> FrozenList[_T]: ... + @overload + def __setitem__(self, i: int, o: _T) -> None: ... + @overload + def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ... + @overload + def __delitem__(self, i: int) -> None: ... + @overload + def __delitem__(self, i: slice) -> None: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterator[_T]: ... + def __reversed__(self) -> Iterator[_T]: ... + def __eq__(self, other: object) -> bool: ... + def __le__(self, other: FrozenList[_T]) -> bool: ... + def __ne__(self, other: object) -> bool: ... + def __lt__(self, other: FrozenList[_T]) -> bool: ... + def __ge__(self, other: FrozenList[_T]) -> bool: ... + def __gt__(self, other: FrozenList[_T]) -> bool: ... + def insert(self, pos: int, item: _T) -> None: ... + def __repr__(self) -> str: ... + def __hash__(self) -> int: ... + +# types for C accelerators are the same +CFrozenList = PyFrozenList = FrozenList diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ee278da92e4161aeb4b91f9f7b02e2d7fb1904 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/frozenlist/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cf57bc3beeb2ac6d919ddd374b7adb629ebd9a23 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx b/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9ee846c1aeb17ac8521f44bcb8617f189b89e5fe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/frozenlist/_frozenlist.pyx @@ -0,0 +1,123 @@ +import sys +import types +from collections.abc import MutableSequence + + +cdef class FrozenList: + + if sys.version_info >= (3, 9): + __class_getitem__ = classmethod(types.GenericAlias) + else: + @classmethod + def __class_getitem__(cls): + return cls + + cdef readonly bint frozen + cdef list _items + + def __init__(self, items=None): + self.frozen = False + if items is not None: + items = list(items) + else: + items = [] + self._items = items + + cdef object _check_frozen(self): + if self.frozen: + raise RuntimeError("Cannot modify frozen list.") + + cdef inline object _fast_len(self): + return len(self._items) + + def freeze(self): + self.frozen = True + + def __getitem__(self, index): + return self._items[index] + + def __setitem__(self, index, value): + self._check_frozen() + self._items[index] = value + + def __delitem__(self, index): + self._check_frozen() + del self._items[index] + + def __len__(self): + return self._fast_len() + + def __iter__(self): + return self._items.__iter__() + + def __reversed__(self): + return self._items.__reversed__() + + def __richcmp__(self, other, op): + if op == 0: # < + return list(self) < other + if op == 1: # <= + return list(self) <= other + if op == 2: # == + return list(self) == other + if op == 3: # != + return list(self) != other + if op == 4: # > + return list(self) > other + if op == 5: # => + return list(self) >= other + + def insert(self, pos, item): + self._check_frozen() + self._items.insert(pos, item) + + def __contains__(self, item): + return item in self._items + + def __iadd__(self, items): + self._check_frozen() + self._items += list(items) + return self + + def index(self, item): + return self._items.index(item) + + def remove(self, item): + self._check_frozen() + self._items.remove(item) + + def clear(self): + self._check_frozen() + self._items.clear() + + def extend(self, items): + self._check_frozen() + self._items += list(items) + + def reverse(self): + self._check_frozen() + self._items.reverse() + + def pop(self, index=-1): + self._check_frozen() + return self._items.pop(index) + + def append(self, item): + self._check_frozen() + return self._items.append(item) + + def count(self, item): + return self._items.count(item) + + def __repr__(self): + return ''.format(self.frozen, + self._items) + + def __hash__(self): + if self.frozen: + return hash(tuple(self._items)) + else: + raise RuntimeError("Cannot hash unfrozen list.") + + +MutableSequence.register(FrozenList) diff --git a/llmeval-env/lib/python3.10/site-packages/frozenlist/py.typed b/llmeval-env/lib/python3.10/site-packages/frozenlist/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/frozenlist/py.typed @@ -0,0 +1 @@ +Marker diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4541d694f44671ae1f3a40c38da4ea6d708b92de Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/exceptions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d92e9bf8569cd1a7c2633784d7ad47637afe5ec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/exceptions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/lazy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/lazy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce815c965e199854ecfc7325acaa1d0ddb4daa79 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/lazy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/reference.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bae205845e7ebab90396415ec7cdd3e8aa09e5e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/reference.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzfile.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzfile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78d75ee2e958a49fa96810425a8edd455b860949 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzfile.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzinfo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a0edbd02765483aac56cd02324d51d0152fa975 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/__pycache__/tzinfo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Casey b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Casey new file mode 100644 index 0000000000000000000000000000000000000000..586a7653ef20d04440bfcb6645454ef215a1bba4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Casey differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Davis b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Davis new file mode 100644 index 0000000000000000000000000000000000000000..d4d47b24647bcabc981440f0859e94228428798d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Davis differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/DumontDUrville b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/DumontDUrville new file mode 100644 index 0000000000000000000000000000000000000000..7be2474dd91c8a7da181fcda09d838254b890d75 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/DumontDUrville differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Macquarie b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Macquarie new file mode 100644 index 0000000000000000000000000000000000000000..9e7cc687d76b00d8f112245d5c5d2f20a2a61814 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Macquarie differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Mawson b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Mawson new file mode 100644 index 0000000000000000000000000000000000000000..6d93f6e1d3f76bcb6325f503958d19798b098fa0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Mawson differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/McMurdo b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/McMurdo new file mode 100644 index 0000000000000000000000000000000000000000..6575fdce31183d8238b18f2f30ab5b9227c7071c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/McMurdo differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Palmer b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Palmer new file mode 100644 index 0000000000000000000000000000000000000000..9c8fd317e0537afd4066001f6700cd6490fbe5a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Palmer differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Rothera b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Rothera new file mode 100644 index 0000000000000000000000000000000000000000..241cc44d507c50777f7225df197765e522f22313 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Rothera differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/South_Pole b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/South_Pole new file mode 100644 index 0000000000000000000000000000000000000000..6575fdce31183d8238b18f2f30ab5b9227c7071c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/South_Pole differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Syowa b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Syowa new file mode 100644 index 0000000000000000000000000000000000000000..8c8062471dce91a5be827d6908795ee7391a4afc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Syowa differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Troll b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Troll new file mode 100644 index 0000000000000000000000000000000000000000..a1dcea14de9cfb95311ebe94e8a1096c27800941 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Troll differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Vostok b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Vostok new file mode 100644 index 0000000000000000000000000000000000000000..016e06b1bbc8d2e73d07265ef12e2be0c77982f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Antarctica/Vostok differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT new file mode 100644 index 0000000000000000000000000000000000000000..0aea4c3d43e504dafabc031d7ca9cbe8db46163c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide new file mode 100644 index 0000000000000000000000000000000000000000..f5dedca59e2b220f7395c73f60ff26e610373e8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane new file mode 100644 index 0000000000000000000000000000000000000000..7ff9949ffa93e44835ab133998b89e440094f909 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Broken_Hill b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Broken_Hill new file mode 100644 index 0000000000000000000000000000000000000000..698c76e30e91f568a29daca12993cfacbfdbf83e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Broken_Hill differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Canberra b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Canberra new file mode 100644 index 0000000000000000000000000000000000000000..0aea4c3d43e504dafabc031d7ca9cbe8db46163c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Canberra differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin new file mode 100644 index 0000000000000000000000000000000000000000..74a30879bc6180d588a706451226cb4c95faf79d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla new file mode 100644 index 0000000000000000000000000000000000000000..1551e96cbc3de5565356954b61aac3c4388e90db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI new file mode 100644 index 0000000000000000000000000000000000000000..069a95ad686c1139e2ff2b9ce94dc5ef5bc98c67 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman new file mode 100644 index 0000000000000000000000000000000000000000..4ee1825abfe65887069dcbd10bcf786d50ba0702 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lord_Howe b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lord_Howe new file mode 100644 index 0000000000000000000000000000000000000000..069a95ad686c1139e2ff2b9ce94dc5ef5bc98c67 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lord_Howe differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Melbourne b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Melbourne new file mode 100644 index 0000000000000000000000000000000000000000..ee903f4b1fc292bc9cbec7b501a266030ef3510e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Melbourne differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/NSW b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/NSW new file mode 100644 index 0000000000000000000000000000000000000000..0aea4c3d43e504dafabc031d7ca9cbe8db46163c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/NSW differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/North b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/North new file mode 100644 index 0000000000000000000000000000000000000000..74a30879bc6180d588a706451226cb4c95faf79d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/North differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth new file mode 100644 index 0000000000000000000000000000000000000000..f8ddbdf215d34b022af11c3d1930dd6ea4dca87e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland new file mode 100644 index 0000000000000000000000000000000000000000..7ff9949ffa93e44835ab133998b89e440094f909 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/South b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/South new file mode 100644 index 0000000000000000000000000000000000000000..f5dedca59e2b220f7395c73f60ff26e610373e8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/South differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Sydney b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Sydney new file mode 100644 index 0000000000000000000000000000000000000000..0aea4c3d43e504dafabc031d7ca9cbe8db46163c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Sydney differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria new file mode 100644 index 0000000000000000000000000000000000000000..ee903f4b1fc292bc9cbec7b501a266030ef3510e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/West b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/West new file mode 100644 index 0000000000000000000000000000000000000000..f8ddbdf215d34b022af11c3d1930dd6ea4dca87e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/West differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna new file mode 100644 index 0000000000000000000000000000000000000000..698c76e30e91f568a29daca12993cfacbfdbf83e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental new file mode 100644 index 0000000000000000000000000000000000000000..010c6bd04cae79078540da560ce38400bfe0ade6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland new file mode 100644 index 0000000000000000000000000000000000000000..184cb6a83b3392d0492c42297531c85e7e38c4f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern new file mode 100644 index 0000000000000000000000000000000000000000..a8b9ab1992257d721ad627b14f535c3d4b020888 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke new file mode 100644 index 0000000000000000000000000000000000000000..025d132dd48ba978c6fedf86d70173127be49d49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Mountain b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Mountain new file mode 100644 index 0000000000000000000000000000000000000000..abb2b974a47eb3e5c8b4f5d4370baf4898b239ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Mountain differ diff --git a/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific new file mode 100644 index 0000000000000000000000000000000000000000..610e7af5fc13d9784de30d272c7c39d7938873a0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__init__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d7fa3d7dd3835568205cce19933a793c8fe9660 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__init__.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +__version__ = '2.4.2' +__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores' + + +from .utils import smart_open, SACREBLEU_DIR, download_test_set +from .utils import get_source_file, get_reference_files +from .utils import get_available_testsets, get_langpairs_for_testset +from .metrics.helpers import extract_word_ngrams, extract_char_ngrams +from .dataset import DATASETS +from .metrics import BLEU, CHRF, TER + +# Backward compatibility functions for old style API access (<= 1.4.10) +from .compat import corpus_bleu, raw_corpus_bleu, sentence_bleu +from .compat import corpus_chrf, sentence_chrf +from .compat import corpus_ter, sentence_ter + +__all__ = [ + 'smart_open', 'SACREBLEU_DIR', 'download_test_set', + 'get_source_file', 'get_reference_files', + 'get_available_testsets', 'get_langpairs_for_testset', + 'extract_word_ngrams', 'extract_char_ngrams', + 'DATASETS', + 'BLEU', 'CHRF', 'TER', + 'corpus_bleu', 'raw_corpus_bleu', 'sentence_bleu', + 'corpus_chrf', 'sentence_chrf', + 'corpus_ter', 'sentence_ter' +] diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__main__.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..3833741e8076dbac776a383f8e05ce8cafaac92e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__main__.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +""" +SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. +Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. +It also knows all the standard test sets and handles downloading, processing, and tokenization for you. + +See the [README.md] file for more information. +""" +from .sacrebleu import main + +if __name__ == '__main__': + main() diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed3dd7ccaead80dc6115ee87a62c28566e517c82 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c0fa5d4088b90e99704d51f94b39192193d9621 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..255afef3dff3e0028976eaef580219f1fec6f780 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1dd7e14bc48ff1cedc6a472d0498d970f81593ad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666369cdf70ce9789f7895f37d46ba6b6a2278e3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9171003dc6b5c6a3ec3cbb8deb68a6093c931f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/compat.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..573596037928ddc5b8c5b8df99202c13f0681943 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/compat.py @@ -0,0 +1,205 @@ +from typing import Sequence, Optional + +from .metrics import BLEU, CHRF, TER, BLEUScore, CHRFScore, TERScore + + +###################################################################### +# Backward compatibility functions for old style API access (< 1.4.11) +###################################################################### +def corpus_bleu(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + smooth_method='exp', + smooth_value=None, + force=False, + lowercase=False, + tokenize=BLEU.TOKENIZER_DEFAULT, + use_effective_order=False) -> BLEUScore: + """Computes BLEU for a corpus against a single (or multiple) reference(s). + This is the main CLI entry point for computing BLEU between a system output + and a reference sentence. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param force: Ignore data that looks already tokenized + :param lowercase: Lowercase the data + :param tokenize: The tokenizer to use + :param use_effective_order: Don't take into account n-gram orders without any match. + :return: a `BLEUScore` object + """ + metric = BLEU( + lowercase=lowercase, force=force, tokenize=tokenize, + smooth_method=smooth_method, smooth_value=smooth_value, + effective_order=use_effective_order) + + return metric.corpus_score(hypotheses, references) + + +def raw_corpus_bleu(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + smooth_value: Optional[float] = BLEU.SMOOTH_DEFAULTS['floor']) -> BLEUScore: + """Computes BLEU for a corpus against a single (or multiple) reference(s). + This convenience function assumes a particular set of arguments i.e. + it disables tokenization and applies a `floor` smoothing with value `0.1`. + + This convenience call does not apply any tokenization at all, + neither to the system output nor the reference. It just computes + BLEU on the "raw corpus" (hence the name). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param smooth_value: The smoothing value for `floor`. If not given, the default of 0.1 is used. + :return: Returns a `BLEUScore` object. + + """ + return corpus_bleu( + hypotheses, references, smooth_method='floor', + smooth_value=smooth_value, force=True, tokenize='none', + use_effective_order=True) + + +def sentence_bleu(hypothesis: str, + references: Sequence[str], + smooth_method: str = 'exp', + smooth_value: Optional[float] = None, + lowercase: bool = False, + tokenize=BLEU.TOKENIZER_DEFAULT, + use_effective_order: bool = True) -> BLEUScore: + """ + Computes BLEU for a single sentence against a single (or multiple) reference(s). + + Disclaimer: Computing BLEU at the sentence level is not its intended use as + BLEU is a corpus-level metric. + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none') + :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value. + :param lowercase: Lowercase the data + :param tokenize: The tokenizer to use + :param use_effective_order: Don't take into account n-gram orders without any match. + :return: Returns a `BLEUScore` object. + """ + metric = BLEU( + lowercase=lowercase, tokenize=tokenize, force=False, + smooth_method=smooth_method, smooth_value=smooth_value, + effective_order=use_effective_order) + + return metric.sentence_score(hypothesis, references) + + +def corpus_chrf(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + char_order: int = CHRF.CHAR_ORDER, + word_order: int = CHRF.WORD_ORDER, + beta: int = CHRF.BETA, + remove_whitespace: bool = True, + eps_smoothing: bool = False) -> CHRFScore: + """ + Computes chrF for a corpus against a single (or multiple) reference(s). + If `word_order` equals to 2, the metric is referred to as chrF++. + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction. + :return: A `CHRFScore` object. + """ + metric = CHRF( + char_order=char_order, + word_order=word_order, + beta=beta, + whitespace=not remove_whitespace, + eps_smoothing=eps_smoothing) + return metric.corpus_score(hypotheses, references) + + +def sentence_chrf(hypothesis: str, + references: Sequence[str], + char_order: int = CHRF.CHAR_ORDER, + word_order: int = CHRF.WORD_ORDER, + beta: int = CHRF.BETA, + remove_whitespace: bool = True, + eps_smoothing: bool = False) -> CHRFScore: + """ + Computes chrF for a single sentence against a single (or multiple) reference(s). + If `word_order` equals to 2, the metric is referred to as chrF++. + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param char_order: Character n-gram order. + :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++. + :param beta: Determine the importance of recall w.r.t precision. + :param eps_smoothing: If `True`, applies epsilon smoothing similar + to reference chrF++.py, NLTK and Moses implementations. Otherwise, + it takes into account effective match order similar to sacreBLEU < 2.0.0. + :param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction. + :return: A `CHRFScore` object. + """ + metric = CHRF( + char_order=char_order, + word_order=word_order, + beta=beta, + whitespace=not remove_whitespace, + eps_smoothing=eps_smoothing) + return metric.sentence_score(hypothesis, references) + + +def corpus_ter(hypotheses: Sequence[str], + references: Sequence[Sequence[str]], + normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False) -> TERScore: + """ + Computes TER for a corpus against a single (or multiple) reference(s). + + :param hypotheses: A sequence of hypothesis strings. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. + :param normalized: Enable character normalization. + :param no_punct: Remove punctuation. + :param asian_support: Enable special treatment of Asian characters. + :param case_sensitive: Enables case-sensitivity. + :return: A `TERScore` object. + """ + metric = TER( + normalized=normalized, + no_punct=no_punct, + asian_support=asian_support, + case_sensitive=case_sensitive) + return metric.corpus_score(hypotheses, references) + + +def sentence_ter(hypothesis: str, + references: Sequence[str], + normalized: bool = False, + no_punct: bool = False, + asian_support: bool = False, + case_sensitive: bool = False) -> TERScore: + """ + Computes TER for a single hypothesis against a single (or multiple) reference(s). + + :param hypothesis: A single hypothesis string. + :param references: A sequence of reference strings. + :param normalized: Enable character normalization. + :param no_punct: Remove punctuation. + :param asian_support: Enable special treatment of Asian characters. + :param case_sensitive: Enable case-sensitivity. + :return: A `TERScore` object. + """ + metric = TER( + normalized=normalized, + no_punct=no_punct, + asian_support=asian_support, + case_sensitive=case_sensitive) + return metric.sentence_score(hypothesis, references) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..4f78bcc3ccb3447c178c1cf2e7d3aebe4e50ed1a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py @@ -0,0 +1,207 @@ +import os + +import lxml.etree as ET + +from ..utils import smart_open +from .base import Dataset + +from collections import defaultdict + + +def _get_field_by_translator(translator): + if not translator: + return "ref" + else: + return f"ref:{translator}" + +class WMTXMLDataset(Dataset): + """ + The 2021+ WMT dataset format. Everything is contained in a single file. + Can be parsed with the lxml parser. + """ + @staticmethod + def _unwrap_wmt21_or_later(raw_file): + """ + Unwraps the XML file from wmt21 or later. + This script is adapted from https://github.com/wmt-conference/wmt-format-tools + + :param raw_file: The raw xml file to unwrap. + :return: Dictionary which contains the following fields: + - `src`: The source sentences. + - `docid`: ID indicating which document the sentences belong to. + - `origlang`: The original language of the document. + - `ref:{translator}`: The references produced by each translator. + - `ref`: An alias for the references from the first translator. + """ + tree = ET.parse(raw_file) + # Find and check the documents (src, ref, hyp) + src_langs, ref_langs, translators = set(), set(), set() + for src_doc in tree.getroot().findall(".//src"): + src_langs.add(src_doc.get("lang")) + + for ref_doc in tree.getroot().findall(".//ref"): + ref_langs.add(ref_doc.get("lang")) + translator = ref_doc.get("translator") + translators.add(translator) + + assert ( + len(src_langs) == 1 + ), f"Multiple source languages found in the file: {raw_file}" + assert ( + len(ref_langs) == 1 + ), f"Found {len(ref_langs)} reference languages found in the file: {raw_file}" + + src = [] + docids = [] + orig_langs = [] + domains = [] + + refs = { _get_field_by_translator(translator): [] for translator in translators } + + systems = defaultdict(list) + + src_sent_count, doc_count = 0, 0 + for doc in tree.getroot().findall(".//doc"): + docid = doc.attrib["id"] + origlang = doc.attrib["origlang"] + # present wmt22++ + domain = doc.attrib.get("domain", None) + + # Skip the testsuite + if "testsuite" in doc.attrib: + continue + + doc_count += 1 + src_sents = { + int(seg.get("id")): seg.text for seg in doc.findall(".//src//seg") + } + + def get_sents(doc): + return { + int(seg.get("id")): seg.text if seg.text else "" + for seg in doc.findall(".//seg") + } + + ref_docs = doc.findall(".//ref") + + trans_to_ref = { + ref_doc.get("translator"): get_sents(ref_doc) for ref_doc in ref_docs + } + + hyp_docs = doc.findall(".//hyp") + hyps = { + hyp_doc.get("system"): get_sents(hyp_doc) for hyp_doc in hyp_docs + } + + for seg_id in sorted(src_sents.keys()): + # no ref translation is available for this segment + if not any([value.get(seg_id, "") for value in trans_to_ref.values()]): + continue + for translator in translators: + refs[_get_field_by_translator(translator)].append( + trans_to_ref.get(translator, {translator: {}}).get(seg_id, "") + ) + src.append(src_sents[seg_id]) + for system_name in hyps.keys(): + systems[system_name].append(hyps[system_name][seg_id]) + docids.append(docid) + orig_langs.append(origlang) + if domain is not None: + domains.append(domain) + src_sent_count += 1 + + data = {"src": src, **refs, "docid": docids, "origlang": orig_langs, **systems} + if len(domains): + data["domain"] = domains + + return data + + def _get_langpair_path(self, langpair): + """ + Returns the path for this language pair. + This is useful because in WMT22, the language-pair data structure can be a dict, + in order to allow for overriding which test set to use. + """ + langpair_data = self._get_langpair_metadata(langpair)[langpair] + rel_path = langpair_data["path"] if isinstance(langpair_data, dict) else langpair_data[0] + return os.path.join(self._rawdir, rel_path) + + def process_to_text(self, langpair=None): + """Processes raw files to plain text files. + + :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed. + """ + # ensure that the dataset is downloaded + self.maybe_download() + + for langpair in sorted(self._get_langpair_metadata(langpair).keys()): + # The data type can be a list of paths, or a dict, containing the "path" + # and an override on which labeled reference to use (key "refs") + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + for fieldname in fields: + textfile = self._get_txt_file_path(langpair, fieldname) + + # skip if the file already exists + if os.path.exists(textfile) and os.path.getsize(textfile) > 0: + continue + + with smart_open(textfile, "w") as fout: + for line in fields[fieldname]: + print(self._clean(line), file=fout) + + def _get_langpair_allowed_refs(self, langpair): + """ + Returns the preferred references for this language pair. + This can be set in the language pair block (as in WMT22), and backs off to the + test-set-level default, or nothing. + + There is one exception. In the metadata, sometimes there is no translator field + listed (e.g., wmt22:liv-en). In this case, the reference is set to "", and the + field "ref" is returned. + """ + defaults = self.kwargs.get("refs", []) + langpair_data = self._get_langpair_metadata(langpair)[langpair] + if isinstance(langpair_data, dict): + allowed_refs = langpair_data.get("refs", defaults) + else: + allowed_refs = defaults + allowed_refs = [_get_field_by_translator(ref) for ref in allowed_refs] + + return allowed_refs + + def get_reference_files(self, langpair): + """ + Returns the requested reference files. + This is defined as a default at the test-set level, and can be overridden per language. + """ + # Iterate through the (label, file path) pairs, looking for permitted labels + allowed_refs = self._get_langpair_allowed_refs(langpair) + all_files = self.get_files(langpair) + all_fields = self.fieldnames(langpair) + ref_files = [ + f for f, field in zip(all_files, all_fields) if field in allowed_refs + ] + return ref_files + + def fieldnames(self, langpair): + """ + Return a list of all the field names. For most source, this is just + the source and the reference. For others, it might include the document + ID for each line, or the original language (origLang). + + get_files() should return the same number of items as this. + + :param langpair: The language pair (e.g., "de-en") + :return: a list of field names + """ + self.maybe_download() + rawfile = self._get_langpair_path(langpair) + + with smart_open(rawfile) as fin: + fields = self._unwrap_wmt21_or_later(fin) + + return list(fields.keys()) diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/py.typed b/llmeval-env/lib/python3.10/site-packages/sacrebleu/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/sacrebleu.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/sacrebleu.py new file mode 100644 index 0000000000000000000000000000000000000000..d778e1db9fd55adf18e700876144c11b0e9f955d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/sacrebleu.py @@ -0,0 +1,577 @@ +#!/usr/bin/env python3 + +# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed on +# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +""" +SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. +Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. +It also knows all the standard test sets and handles downloading, processing, and tokenization for you. + +See the [README.md] file for more information. +""" + +import io +import os +import sys +import logging +import pathlib +import argparse +from collections import defaultdict + + +# Allows calling the script as a standalone utility +# See: https://github.com/mjpost/sacrebleu/issues/86 +if __package__ is None and __name__ == '__main__': + parent = pathlib.Path(__file__).absolute().parents[1] + sys.path.insert(0, str(parent)) + __package__ = 'sacrebleu' + +from .dataset import DATASETS +from .metrics import METRICS +from .utils import smart_open, filter_subset, get_langpairs_for_testset, get_available_testsets +from .utils import print_test_set, print_subset_results, get_reference_files, download_test_set +from .utils import args_to_dict, sanity_check_lengths, print_results_table, print_single_results +from .utils import get_available_testsets_for_langpair, Color + +from . import __version__ as VERSION + +sacrelogger = logging.getLogger('sacrebleu') + +try: + # SIGPIPE is not available on Windows machines, throwing an exception. + from signal import SIGPIPE # type: ignore + + # If SIGPIPE is available, change behaviour to default instead of ignore. + from signal import signal, SIG_DFL + signal(SIGPIPE, SIG_DFL) +except ImportError: + pass + + +def parse_args(): + arg_parser = argparse.ArgumentParser( + description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n' + 'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n' + ' cat output.detok.de | sacrebleu -t wmt14 -l en-de', + formatter_class=argparse.RawDescriptionHelpFormatter) + + arg_parser.add_argument('--citation', '--cite', default=False, action='store_true', + help='Dump the bibtex citation and quit.') + arg_parser.add_argument('--list', default=False, action='store_true', + help='Print a list of all available test sets.') + arg_parser.add_argument('--test-set', '-t', type=str, default=None, + help='The test set to use (see also --list) or a comma-separated list of test sets to be concatenated.') + arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None, + help='Source-target language pair (2-char ISO639-1 codes).') + arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None, + help='Use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation.') + arg_parser.add_argument('--subset', dest='subset', default=None, + help='Use a subset of sentences whose document annotation matches a given regex (see SUBSETS in the source code).') + arg_parser.add_argument('--download', type=str, default=None, + help='Download a test set and quit.') + arg_parser.add_argument('--echo', nargs="+", type=str, default=None, + help='Output the source (src), reference (ref), or other available field (docid, ref:A, ref:1 for example) to STDOUT and quit. ' + 'You can get available fields with options `--list` and `-t`' 'For example: `sacrebleu -t wmt21 --list`. ' + 'If multiple fields are given, they are outputted with tsv format in the order they are given.' + 'You can also use `--echo all` to output all available fields.') + + # I/O related arguments + # Multiple input files can be provided for significance testing for example + arg_parser.add_argument('--input', '-i', type=str, nargs='*', default=None, + help='Read input from file(s) instead of STDIN.') + arg_parser.add_argument('refs', nargs='*', default=[], + help='Optional list of references. If given, it should preceed the -i/--input argument.') + arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, + help='Split the reference stream on tabs, and expect this many references. (Default: %(default)s)') + arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8', + help='Open text files with specified encoding (Default: %(default)s)') + + # Metric selection + avail_metrics = [m.lower() for m in METRICS] + arg_parser.add_argument('--metrics', '-m', choices=avail_metrics, nargs='+', default=['bleu'], + help='Space-delimited list of metrics to compute (Default: bleu)') + arg_parser.add_argument('--sentence-level', '-sl', action='store_true', help='Compute metric for each sentence.') + + # BLEU-related arguments + # since sacreBLEU had only support for BLEU initially, the argument names + # are not prefixed with 'bleu' as in chrF arguments for example. + # Let's do that manually here through dest= options, as otherwise + # things will get quite hard to maintain when other metrics are added. + bleu_args = arg_parser.add_argument_group('BLEU related arguments') + + bleu_args.add_argument('--smooth-method', '-s', choices=METRICS['BLEU'].SMOOTH_DEFAULTS.keys(), default='exp', + dest='bleu_smooth_method', + help='Smoothing method: exponential decay, floor (increment zero counts), add-k (increment num/denom by k for n>1), or none. (Default: %(default)s)') + bleu_args.add_argument('--smooth-value', '-sv', type=float, default=None, + dest='bleu_smooth_value', + help='The smoothing value. Only valid for floor and add-k. ' + f"(Defaults: floor: {METRICS['BLEU'].SMOOTH_DEFAULTS['floor']}, " + f"add-k: {METRICS['BLEU'].SMOOTH_DEFAULTS['add-k']})") + bleu_args.add_argument('--tokenize', '-tok', choices=METRICS['BLEU'].TOKENIZERS, default=None, + dest='bleu_tokenize', + help='Tokenization method to use for BLEU. If not provided, defaults to `zh` for Chinese, ' + '`ja-mecab` for Japanese, `ko-mecab` for Korean and `13a` (mteval) otherwise.') + bleu_args.add_argument('--lowercase', '-lc', dest='bleu_lowercase', action='store_true', default=False, + help='If True, enables case-insensitivity. (Default: %(default)s)') + bleu_args.add_argument('--force', default=False, action='store_true', + dest='bleu_force', help='Insist that your tokenized input is actually detokenized.') + + # ChrF-related arguments + chrf_args = arg_parser.add_argument_group('chrF related arguments') + chrf_args.add_argument('--chrf-char-order', '-cc', type=int, default=METRICS['CHRF'].CHAR_ORDER, + help='Character n-gram order. (Default: %(default)s)') + chrf_args.add_argument('--chrf-word-order', '-cw', type=int, default=METRICS['CHRF'].WORD_ORDER, + help='Word n-gram order (Default: %(default)s). If equals to 2, the metric is referred to as chrF++.') + chrf_args.add_argument('--chrf-beta', type=int, default=METRICS['CHRF'].BETA, + help='Determine the importance of recall w.r.t precision. (Default: %(default)s)') + chrf_args.add_argument('--chrf-whitespace', action='store_true', default=False, + help='Include whitespaces when extracting character n-grams. (Default: %(default)s)') + chrf_args.add_argument('--chrf-lowercase', action='store_true', default=False, + help='Enable case-insensitivity. (Default: %(default)s)') + chrf_args.add_argument('--chrf-eps-smoothing', action='store_true', default=False, + help='Enables epsilon smoothing similar to chrF++.py, NLTK and Moses; instead of effective order smoothing. (Default: %(default)s)') + + # TER related arguments + ter_args = arg_parser.add_argument_group("TER related arguments (The defaults replicate TERCOM's behavior)") + ter_args.add_argument('--ter-case-sensitive', action='store_true', + help='Enables case sensitivity. (Default: %(default)s)') + ter_args.add_argument('--ter-asian-support', action='store_true', + help='Enables special treatment of Asian characters. (Default: %(default)s)') + ter_args.add_argument('--ter-no-punct', action='store_true', + help='Removes punctuation. (Default: %(default)s)') + ter_args.add_argument('--ter-normalized', action='store_true', + help='Applies basic normalization and tokenization. (Default: %(default)s)') + + # Bootstrap resampling for confidence intervals + sign_args = arg_parser.add_argument_group('Confidence interval (CI) estimation for single-system evaluation') + sign_args.add_argument('--confidence', '-ci', action='store_true', + help='Report confidence interval using bootstrap resampling.') + sign_args.add_argument('--confidence-n', '-cin', type=int, default=1000, + help='Set the number of bootstrap resamples for CI estimation (Default: %(default)s).') + + # Paired significance testing + pair_args = arg_parser.add_argument_group('Paired significance testing for multi-system evaluation') + pair_args_choice = pair_args.add_mutually_exclusive_group() + + pair_args_choice.add_argument('--paired-ar', '-par', action='store_true', + help='Perform paired test using approximate randomization (AR). This option is ' + 'mutually exclusive with --paired-bs (Default: %(default)s).') + pair_args_choice.add_argument('--paired-bs', '-pbs', action='store_true', + help='Perform paired test using bootstrap resampling. This option is ' + 'mutually exclusive with --paired-ar (Default: %(default)s).') + + pair_args.add_argument('--paired-ar-n', '-parn', type=int, default=10000, + help='Number of trials for approximate randomization test (Default: %(default)s).') + + pair_args.add_argument('--paired-bs-n', '-pbsn', type=int, default=1000, + help='Number of bootstrap resamples for paired bootstrap resampling test (Default: %(default)s).') + + pair_args.add_argument('--paired-jobs', '-j', type=int, default=1, + help='If 0, launches as many workers as the number of systems. If > 0, sets the number of workers manually. ' + 'This feature is currently not supported on Windows.') + + # Reporting related arguments + report_args = arg_parser.add_argument_group('Reporting related arguments') + report_args.add_argument('--quiet', '-q', default=False, action='store_true', + help='Suppress verbose messages.') + report_args.add_argument('--short', '-sh', default=False, action='store_true', + help='Produce a shorter (less human readable) signature.') + report_args.add_argument('--score-only', '-b', default=False, action='store_true', + help='Print only the computed score.') + report_args.add_argument('--width', '-w', type=int, default=1, + help='Floating point width (Default: %(default)s).') + report_args.add_argument('--detail', '-d', default=False, action='store_true', + help='Print detailed information (split test sets based on origlang).') + report_args.add_argument('--no-color', '-nc', action='store_true', + help='Disable the occasional use of terminal colors.') + + output_formats = ['json', 'text', 'latex'] + report_args.add_argument('--format', '-f', default='json', choices=output_formats, + help='Set the output format. `latex` is only valid for multi-system mode whereas ' + '`json` and `text` apply to single-system mode only. This flag is overridden if the ' + 'SACREBLEU_FORMAT environment variable is set to one of the valid choices (Default: %(default)s).') + + arg_parser.add_argument('--version', '-V', action='version', version='%(prog)s {}'.format(VERSION)) + + args = arg_parser.parse_args() + + # Override the format from the environment, if any + if 'SACREBLEU_FORMAT' in os.environ: + _new_value = os.environ['SACREBLEU_FORMAT'].lower() + if _new_value in output_formats: + args.format = _new_value + + return args + + +def main(): + args = parse_args() + + # Is paired test requested? + paired_test_mode = args.paired_bs or args.paired_ar + + # Explicitly set the encoding + sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n") + sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True) + + if os.environ.get('NO_COLOR', False) or args.no_color: + Color.ENABLE_COLORS = False + else: + # These should come after all stdout manipulations otherwise cause + # issues esp. on Windows + import colorama + colorama.init() + + if not args.quiet: + logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s') + + if args.download: + download_test_set(args.download, args.langpair) + sys.exit(0) + + if args.list: + if args.test_set: + langpairs = get_langpairs_for_testset(args.test_set) + for pair in langpairs: + fields = DATASETS[args.test_set].fieldnames(pair) + print(f'{pair}: {", ".join(fields)}') + else: + if args.langpair: + print(f'The available test sets for {args.langpair} are:') + testsets = get_available_testsets_for_langpair(args.langpair) + else: + print('The available test sets are:') + testsets = get_available_testsets() + for testset in sorted(testsets): + desc = DATASETS[testset].description.strip() + print(f'{testset:<30}: {desc}') + sys.exit(0) + + if args.sentence_level and len(args.metrics) > 1: + sacrelogger.error('Only one metric can be used in sentence-level mode.') + sys.exit(1) + + if args.citation: + if not args.test_set: + sacrelogger.error('I need a test set (-t).') + sys.exit(1) + for test_set in args.test_set.split(','): + if 'citation' not in DATASETS[test_set]: + sacrelogger.error(f'No citation found for {test_set}') + else: + print(DATASETS[test_set].citation) + sys.exit(0) + + if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1): + sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.') + sacrelogger.error('You can only use it with externally provided references, however (i.e., not with `-t`),') + sacrelogger.error('and you cannot then provide multiple reference files.') + sys.exit(1) + + if args.test_set is not None: + for test_set in args.test_set.split(','): + if test_set not in DATASETS: + sacrelogger.error(f'Unknown test set {test_set!r}') + sacrelogger.error('Please run with --list to see the available test sets.') + sys.exit(1) + + if args.test_set is None: + if len(args.refs) == 0: + sacrelogger.error('If manual references given, make sure to provide them ' + 'before the -i/--input argument to avoid confusion.') + sacrelogger.error('Otherwise, I need a predefined test set (-t) from the following list:') + sacrelogger.error(get_available_testsets()) + sys.exit(1) + elif len(args.refs) > 0: + sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references') + sys.exit(1) + elif args.langpair is None: + sacrelogger.error('I need a language pair (-l). Use --list to see available language pairs for this test set.') + sys.exit(1) + else: + for test_set in args.test_set.split(','): + langpairs = get_langpairs_for_testset(test_set) + if args.langpair not in langpairs: + sacrelogger.error(f'No such language pair {args.langpair!r}') + sacrelogger.error(f'Available language pairs for {test_set!r} are:') + for lp in langpairs: + sacrelogger.error(f' > {lp}') + sys.exit(1) + + if args.echo: + if args.langpair is None or args.test_set is None: + sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)") + sys.exit(1) + for test_set in args.test_set.split(','): + print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset) + sys.exit(0) + + # Hack: inject target language info for BLEU, so that it can + # select the tokenizer based on it + if args.langpair: + args.bleu_trg_lang = args.langpair.split('-')[1] + + if args.test_set is not None and args.bleu_tokenize == 'none': + sacrelogger.warning( + "You are turning off BLEU's internal tokenizer " + "presumably to supply your own tokenized files.") + sacrelogger.warning( + "Published numbers will not be comparable to other papers.") + + # concat_ref_files is a list of list of reference filenames + # (concatenation happens if multiple test sets are given through -t) + # Example: [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]] + concat_ref_files = [] + if args.test_set is None: + concat_ref_files.append(args.refs) + else: + # Multiple test sets can be given + for test_set in args.test_set.split(','): + ref_files = get_reference_files(test_set, args.langpair) + if len(ref_files) == 0: + sacrelogger.warning( + f'No references found for test set {test_set}/{args.langpair}.') + concat_ref_files.append(ref_files) + + ################# + # Read references + ################# + full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))] + for ref_files in concat_ref_files: + for refno, ref_file in enumerate(ref_files): + for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1): + line = line.rstrip() + if args.num_refs == 1: + full_refs[refno].append(line) + else: + refs = line.split(sep='\t', maxsplit=args.num_refs - 1) + # We are strict in fixed number of references through CLI + # But the API supports having variable refs per each segment + # by simply having '' or None's as dummy placeholders + if len(refs) != args.num_refs: + sacrelogger.error(f'FATAL: line {lineno}: expected {args.num_refs} fields, but found {len(refs)}.') + sys.exit(17) + for refno, ref in enumerate(refs): + full_refs[refno].append(ref) + + # Decide on the number of final references, override the argument + args.num_refs = len(full_refs) + + # Read hypotheses + # Can't tokenize yet as each metric has its own way of tokenizing things + full_systems, sys_names = [], [] + + if args.input is None: + # Read from STDIN + inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) + + # guess the number of systems by looking at the first line + fields = inputfh.readline().rstrip().split('\t') + + # Set number of systems + num_sys = len(fields) + + # place the first lines already + full_systems = [[s] for s in fields] + + # Enumerate the systems + sys_names = [f'System {i + 1}' for i in range(num_sys)] + + # Read the rest + for line in inputfh: + fields = line.rstrip().split('\t') + if len(fields) != num_sys: + sacrelogger.error('FATAL: the number of tab-delimited fields in the input stream differ across lines.') + sys.exit(17) + # Place systems into the list + for sys_idx, sent in enumerate(fields): + full_systems[sys_idx].append(sent.rstrip()) + else: + # Separate files are given for each system output + # Ex: --input smt.txt nmt.txt + for fname in args.input: + sys_name = fname + + if sys_name in sys_names: + if paired_test_mode and sys_name == sys_names[0]: + # We skip loading a system, if it was already the baseline + sacrelogger.info(f'Ignoring {sys_name!r} as it was also given as the baseline.') + continue + else: + # To avoid ambiguities, we fail if two systems have same names + sacrelogger.error(f"{sys_name!r} already used to name a system.") + sacrelogger.error("Make sure to have a different basename for each system.") + sys.exit(1) + + # Read the system + lines = [] + for line in smart_open(fname, encoding=args.encoding): + lines.append(line.rstrip()) + full_systems.append(lines) + sys_names.append(sys_name) + + # Set final number of systems + num_sys = len(sys_names) + + # Add baseline prefix to the first system for clarity + if paired_test_mode: + if args.input is None: + # STDIN mode, no explicit system names + sys_names = ['Baseline'] + [f'System {i + 1}' for i in range(num_sys - 1)] + else: + # --input mode, we have names for the systems, just change the 1st one + sys_names[0] = f'Baseline: {sys_names[0]}' + + if args.sentence_level: + if num_sys > 1: + sacrelogger.error('Only one system can be evaluated in sentence-level mode.') + sys.exit(1) + if args.confidence or paired_test_mode: + sacrelogger.error('Statistical tests are unavailable in sentence-level mode.') + sys.exit(1) + + # >=2.0.0: effective_order is now part of BLEU class. For sentence-BLEU + # we now need to explicitly enable it without user's intervention + # for backward compatibility. + args.bleu_effective_order = True + + if paired_test_mode and num_sys == 1: + sacrelogger.error('Paired tests require multiple input systems given to --input (-i).') + sys.exit(1) + + if num_sys > 1 and args.confidence: + sacrelogger.error('Use paired tests (--paired) for multiple systems.') + sys.exit(1) + + # Filter subsets if requested + outputs = filter_subset( + [*full_systems, *full_refs], args.test_set, args.langpair, + args.origlang, args.subset) + + # Unpack systems & references back + systems, refs = outputs[:num_sys], outputs[num_sys:] + + # Perform some sanity checks + for system in systems: + if len(system) == 0: + message = f'Test set {args.test_set!r} contains no sentence' + if args.origlang is not None or args.subset is not None: + message += ' with' + if args.origlang: + message += f' origlang={args.origlang}' + if args.subset: + message += f' subset={args.subset}' + args.subset + sacrelogger.error(message) + sys.exit(1) + + # Check lengths + sanity_check_lengths(system, refs, test_set=args.test_set) + + # Create the metrics + metrics = {} + for name in args.metrics: + # Each metric's specific arguments are prefixed with `metricname_` + # for grouping. Filter accordingly and strip the prefixes prior to + # metric object construction. + metric_args = args_to_dict(args, name.lower(), strip_prefix=True) + + # This will cache reference stats for faster re-computation if required + metric_args['references'] = refs + + # Make it uppercase for the rest of the code + name = name.upper() + metrics[name] = METRICS[name](**metric_args) + + # Handle sentence level and quit + if args.sentence_level: + # one metric and one system in use for sentence-level + metric, system = list(metrics.values())[0], systems[0] + + for hypothesis, *references in zip(system, *refs): + score = metric.sentence_score(hypothesis, references) + sig = metric.get_signature().format(args.short) + print(score.format(args.width, args.score_only, sig)) + + sys.exit(0) + + if args.detail and args.format == 'json': + # The translationese info will interfere with JSON output, disable + args.format = 'text' + + ############################## + # Corpus level evaluation mode + ############################## + if num_sys == 1: + # Single system evaluation mode + results = [] + for name in sorted(metrics): + # compute the score + score = metrics[name].corpus_score( + system, references=None, + n_bootstrap=args.confidence_n if args.confidence else 1) + # get the signature + sig = metrics[name].get_signature().format( + args.short if args.format != 'json' else False) + results.append( + score.format(args.width, args.score_only, sig, args.format == 'json')) + + print_single_results(results, args) + + # Prints detailed information for translationese effect experiments + if args.detail: + print_subset_results(metrics, full_systems[0], full_refs, args) + else: + # Multi-system evaluation mode + named_systems = [(sys_names[i], systems[i]) for i in range(num_sys)] + sacrelogger.info(f'Found {num_sys} systems.') + + if not paired_test_mode: + # Bootstrap resampling or the usual single score computation mode + sigs = {} + scores = defaultdict(list) + scores['System'] = sys_names + + for sys_name, system in named_systems: + for name in sorted(metrics): + score = metrics[name].corpus_score(system, references=None) + sigs[score.name] = metrics[name].get_signature().format(args.short) + scores[score.name].append(score.format(args.width, True)) + + else: + # Paired significance testing mode + from .significance import PairedTest + + # Set params + test_type = 'bs' if args.paired_bs else 'ar' + n_samples = args.paired_bs_n if args.paired_bs else args.paired_ar_n + + ps = PairedTest(named_systems, metrics, references=None, + test_type=test_type, n_samples=n_samples, + n_jobs=args.paired_jobs) + + # Set back the number of trials + args.paired_n = ps.n_samples + + # Run the test + sigs, scores = ps() + + # Get signature strings + sigs = {k: v.format(args.short) for k, v in sigs.items()} + + # Dump the results + print_results_table(scores, sigs, args) + + +if __name__ == '__main__': + main() diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/significance.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/significance.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c71d0ab935d0eb195b68126bcad4bdf0facd6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/significance.py @@ -0,0 +1,435 @@ +import os +import logging +import multiprocessing as mp +from typing import Sequence, Dict, Optional, Tuple, List, Union, Any, Mapping + +import numpy as np + +from .metrics.base import Metric, Score, Signature + +IS_WINDOWS = os.name == 'nt' + + +sacrelogger = logging.getLogger('sacrebleu') + + +class Result: + """A container to represent results from a particular statistical + significance test. + :param score: The floating point score for the system at hand. + :param p_value: If exists, represents the p-value when the system at + hand is compared to a baseline using a paired test. + :param mean: When paired bootstrap test is applied, this represents + the true mean score estimated from bootstrap resamples of the system. + :param ci: When paired bootstrap test is applied, this represents + the 95% confidence interval around the true mean score `sys_mean`. + """ + def __init__(self, score: float, p_value: Optional[float] = None, + mean: Optional[float] = None, ci: Optional[float] = None): + self.score = score + self.p_value = p_value + self.mean = mean + self.ci = ci + + def __repr__(self): + return ','.join([f'{k}={str(v)}' for k, v in self.__dict__.items()]) + + +def estimate_ci(scores: np.ndarray) -> Tuple[float, float]: + """Takes a list of scores and returns mean and 95% confidence + interval around the mean. + + :param scores: A list of floating point scores. + :return: A tuple of mean and the 95% CI. + """ + # Sort the scores + scores = np.sort(scores) + n = len(scores) + + # Get CI bounds (95%, i.e. 1/40 from left) + lower_idx = n // 40 + upper_idx = n - lower_idx - 1 + lower, upper = scores[lower_idx], scores[upper_idx] + ci = 0.5 * (upper - lower) + return (scores.mean(), ci) + + +def _bootstrap_resample(stats: List[List[Union[int, float]]], + metric: Metric, n_samples: int = 1000) -> Tuple[str, List[Score]]: + """Performs bootstrap resampling for a single system to estimate + a confidence interval around the true mean. + :param stats: A list of statistics extracted from the system's hypotheses. + :param metric: The `Metric` instance to be used for score computation. + :n_samples: Number of bootstrap resamples to use. + + :return: A tuple of the seed choice as string and the list of `Score` + instances for all bootstrap resamples. + """ + + # Set numpy RNG's seed + # If given -> Fix to the given value + # If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS + seed = os.environ.get('SACREBLEU_SEED', '12345') + _seed = None if seed.lower() == 'none' else int(seed) + rng = np.random.default_rng(_seed) + + # The indices that'll produce all bootstrap resamples at once + idxs = rng.choice(len(stats), size=(n_samples, len(stats)), replace=True) + + # convert to numpy array. float32 is more efficient + stats_np = np.array(stats, dtype='float32') + + # recompute scores for all resamples + scores = [ + metric._compute_score_from_stats(_s.sum(0)) for _s in stats_np[idxs]] + + return str(seed).lower(), scores + + +def _compute_p_value(stats: np.ndarray, real_difference: float) -> float: + """Computes the p-value given the sample statistics and the real statistic. + :param stats: A numpy array with the sample statistics. + :real_difference: The real statistic. + :return: The p-value. + """ + # Taken from: significance/StratifiedApproximateRandomizationTest.java + # https://github.com/jhclark/multeval.git + + # "the != is important. if we want to score the same system against itself + # having a zero difference should not be attributed to chance." + + c = np.sum(stats > real_difference).item() + + # "+1 applies here, though it only matters for small numbers of shufflings, + # which we typically never do. it's necessary to ensure the probability of + # falsely rejecting the null hypothesis is no greater than the rejection + # level of the test (see william and morgan on significance tests) + p = (c + 1) / (len(stats) + 1) + + return p + + +def _paired_ar_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]], + sys_name: str, + hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + metrics: Dict[str, Metric], + n_samples: int = 10000, + n_ar_confidence: int = -1, + seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]: + """Paired two-sided approximate randomization (AR) test for MT evaluation. + + :param baseline_info: A dictionary with `Metric` instances as the keys, + that contains sufficient statistics and a `Result` instance for the baseline system. + :param sys_name: The name of the system to be evaluated. + :param hypotheses: A sequence of string hypotheses for the system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, references + will be used through each metric's internal cache. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param n_samples: The number of AR trials. + :param n_ar_confidence: The number of bootstrap resamples to use for + confidence estimation. A value of -1 disables confidence estimation. + :param seed: The seed value for the RNG. If `None`, the RNG will not be + fixed to a particular seed. + + :return: A tuple with first element being the system name and the second + being a `Result` namedtuple. + """ + # Seed the RNG + rng = np.random.default_rng(seed) + + # Generate indices that'll select stats + pos_sel = rng.integers(2, size=(n_samples, len(hypotheses)), dtype=bool) + + # Flip mask to obtain selectors for system hypotheses + neg_sel = ~pos_sel + + if n_ar_confidence > 0: + # Perform confidence estimation as well + bs_idxs = rng.choice( + len(hypotheses), size=(n_ar_confidence, len(hypotheses)), replace=True) + + results = {} + + for name, metric in metrics.items(): + # Use pre-computed match stats for the baseline + bl_stats, bl_result = baseline_info[name] + + # Compute system's stats and score + sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics') + sys_stats = metric._extract_corpus_statistics(hypotheses, references) + sys_score = metric._aggregate_and_compute(sys_stats) + + # original test statistic: absolute difference between baseline and the system + diff = abs(bl_result.score - sys_score.score) + + sacrelogger.info(f' > Performing approximate randomization test (# trials: {n_samples})') + # get shuffled pseudo systems + shuf_a = pos_sel @ bl_stats + neg_sel @ sys_stats + shuf_b = neg_sel @ bl_stats + pos_sel @ sys_stats + + # Aggregate trial stats and compute scores for each + scores_a = np.array( + [metric._aggregate_and_compute(x).score for x in shuf_a[:, None]]) + scores_b = np.array( + [metric._aggregate_and_compute(x).score for x in shuf_b[:, None]]) + + # Count the statistical difference and compute the p-value + p = _compute_p_value( + np.abs(np.array(scores_a) - np.array(scores_b)), diff) + + res = Result(sys_score.score, p) + + if n_ar_confidence > 0: + sacrelogger.info(f' > Performing bootstrap resampling for confidence interval (# resamples: {n_ar_confidence})') + sys_stats = np.array(sys_stats, dtype='float32') + # recompute scores for all resamples + sys_scores = np.array([ + metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[bs_idxs] + ]) + res.mean, res.ci = estimate_ci(sys_scores) + + # Store the result + results[name] = res + + return sys_name, results + + +def _paired_bs_test(baseline_info: Dict[str, Tuple[np.ndarray, Result]], + sys_name: str, + hypotheses: Sequence[str], + references: Optional[Sequence[Sequence[str]]], + metrics: Dict[str, Metric], + n_samples: int = 1000, + n_ar_confidence: int = -1, + seed: Optional[int] = None) -> Tuple[str, Dict[str, Result]]: + """Paired bootstrap resampling test for MT evaluation. This function + replicates the behavior of the Moses script called + `bootstrap-hypothesis-difference-significance.pl`. + + :param baseline_info: A dictionary with `Metric` instances as the keys, + that contains sufficient statistics and a `Result` instance for the baseline system. + :param sys_name: The name of the system to be evaluated. + :param hypotheses: A sequence of string hypotheses for the system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, references + will be used through each metric's internal cache. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param n_samples: The number of bootstrap resamples. + :param n_ar_confidence: This parameter is not used for this function but + is there for signature compatibility in the API. + :param seed: The seed value for the RNG. If `None`, the RNG will not be + fixed to a particular seed. + + :return: A tuple with first element being the system name and the second + being a `Result` namedtuple. + """ + # Seed the RNG + rng = np.random.default_rng(seed) + + results = {} + + # It takes ~10ms to generated the indices + idxs = rng.choice( + len(hypotheses), size=(n_samples, len(hypotheses)), replace=True) + + for name, metric in metrics.items(): + # Use pre-computed match stats for the baseline + bl_stats, bl_result = baseline_info[name] + + # Compute system's stats and score + sacrelogger.info(f'Computing {name} for {sys_name!r} and extracting sufficient statistics') + sys_stats = metric._extract_corpus_statistics(hypotheses, references) + sys_score = metric._aggregate_and_compute(sys_stats) + + # Convert to numpy arrays for efficient indexing + sys_stats = np.array(sys_stats, dtype='float32') + bl_stats = np.array(bl_stats, dtype='float32') + + # original test statistic: absolute difference between baseline and the system + diff = abs(bl_result.score - sys_score.score) + + sacrelogger.info(f' > Performing paired bootstrap resampling test (# resamples: {n_samples})') + scores_bl = np.array( + [metric._compute_score_from_stats(_s.sum(0)).score for _s in bl_stats[idxs]]) + scores_sys = np.array( + [metric._compute_score_from_stats(_s.sum(0)).score for _s in sys_stats[idxs]]) + + # Compute CI as well + sys_mean, sys_ci = estimate_ci(scores_sys) + + # Compute the statistics + sample_diffs = np.abs(scores_sys - scores_bl) + stats = sample_diffs - sample_diffs.mean() + + # Count the statistical difference and compute the p-value + p = _compute_p_value(stats, diff) + + results[name] = Result(sys_score.score, p, sys_mean, sys_ci) + + return sys_name, results + + +class PairedTest: + """This is the manager class that will call the actual standalone implementation + for approximate randomization or paired bootstrap resampling, based on the + `test_type` argument. + + :param named_systems: A lisf of (system_name, system_hypotheses) tuples on + which the test will be applied. + :param metrics: A dictionary of `Metric` instances that will be computed + for each system. + :param references: A sequence of reference documents with document being + defined as a sequence of reference strings. If `None`, already cached references + will be used through each metric's internal cache. + :param test_type: `ar` for approximate randomization, `bs` for paired bootstrap. + :param n_samples: The number of AR trials (for `ar`) or bootstrap resamples (for `bs`). + The defaults (10000 or 1000 respectively) will be used if 0 is passed. + :param n_ar_confidence: If `approximate randomization` is selected, the number + of bootstrap resamples to use for confidence estimation. A value of -1 disables + confidence estimation. 0 will use the default of 1000. + :param n_jobs: If 0, a worker process will be spawned for each system variant. + If > 0, the number of workers will be set accordingly. The default of 1 + does not use multi-processing. + """ + _DEFAULT_SAMPLES = { + 'ar': 10000, + 'bs': 1000, + } + + def __init__(self, named_systems: List[Tuple[str, Sequence[str]]], + metrics: Mapping[str, Metric], + references: Optional[Sequence[Sequence[str]]], + test_type: str = 'ar', + n_samples: int = 0, + n_ar_confidence: int = -1, + n_jobs: int = 1): + assert test_type in ('ar', 'bs'), f"Unknown test type {test_type!r}" + self.test_type = test_type + + # Set method + if self.test_type == 'ar': + self._fn = _paired_ar_test + elif self.test_type == 'bs': + self._fn = _paired_bs_test + + # Set numpy RNG's seed + # If given -> Fix to the given value + # If given but =='[Nn]one', don't fix the seed i.e. pull entropy from OS + seed = os.environ.get('SACREBLEU_SEED', '12345') + self._seed = None if seed.lower() == 'none' else int(seed) + self.n_jobs = n_jobs + self.references = references + self.named_systems = named_systems + + # Set the defaults if requested + self.n_ar_confidence = n_ar_confidence if n_ar_confidence != 0 else \ + self._DEFAULT_SAMPLES['bs'] + + self.n_samples = n_samples if n_samples > 0 else \ + self._DEFAULT_SAMPLES[self.test_type] + + # Number of systems (excluding the baseline) + self.n_systems = len(named_systems) - 1 + + # Decide on number of workers + if IS_WINDOWS: + sacrelogger.warning('Parallel tests are not supported on Windows.') + self.n_jobs = 1 + elif self.n_jobs == 0: + # Decide automatically + # Divide by two to ignore hyper-threading + n_max_jobs = mp.cpu_count() // 2 + if n_max_jobs == 0: + self.n_jobs = 1 + else: + # Don't use more workers than the number of CPUs + self.n_jobs = min(n_max_jobs, self.n_systems) + + self._signatures: Dict[str, Signature] = {} + self._baseline_info: Dict[str, Tuple[Any, Result]] = {} + + ################################################## + # Pre-compute and cache baseline system statistics + ################################################## + self.metrics = {} + + bl_name, bl_hyps = self.named_systems[0] + + for name, metric in metrics.items(): + sacrelogger.info(f'Pre-computing {name} statistics for {bl_name!r}') + bl_stats = metric._extract_corpus_statistics(bl_hyps, self.references) + bl_score = metric._aggregate_and_compute(bl_stats) + + # Compute CI for the baseline here once + confidence_n = self.n_samples if self.test_type == 'bs' \ + else self.n_ar_confidence + + bl_mean, bl_ci = None, None + if confidence_n > 0: + _, bl_scores = _bootstrap_resample(bl_stats, metric, confidence_n) + bl_mean, bl_ci = estimate_ci(np.array([x.score for x in bl_scores])) + + result = Result(bl_score.score, mean=bl_mean, ci=bl_ci) + # Use updated name for the metric + self._baseline_info[bl_score.name] = (bl_stats, result) + self.metrics[bl_score.name] = metric + + # Update metric signature as well + sig = metric.get_signature() + sig.update('seed', str(self._seed).lower()) + + # Num samples for bs, num trials for AR + sig.update(self.test_type, self.n_samples) + if self.n_ar_confidence > 0: + # Bootstrap is used for AR CI as well + sig.update('bs', self.n_ar_confidence) + self._signatures[bl_score.name] = sig + + def __call__(self) -> Tuple[Dict[str, Signature], Dict[str, List[Union[str, Result]]]]: + """Runs the paired test either on single or multiple worker processes.""" + tasks = [] + scores: Dict[str, List[Union[str, Result]]] = {} + + # Add the name column + scores['System'] = [ns[0] for ns in self.named_systems] + + # Store baseline results as the first position + for metric, (_, result) in self._baseline_info.items(): + scores[metric] = [result] + + # Prepare list of arguments for each comparison + # Skip the baseline (pos: 0) + for idx, (name, hyps) in enumerate(self.named_systems[1:]): + seed = self._seed if self._seed else None + + tasks.append( + (self._baseline_info, name, hyps, self.references, + self.metrics, self.n_samples, self.n_ar_confidence, seed)) + + # Run the test(s) + if self.n_jobs == 1: + results = [self._fn(*args) for args in tasks] + else: + # NOTE: The overhead of worker creation is not negligible + # but if you have many systems and TER enabled, this significantly + # speeds up the test. + # NOTE: This only works on Linux/Mac OS X but not Windows. Windows only + # supports `spawn` backend which requires things to be called + # from within __main__. + sacrelogger.info(f'Launching {self.n_jobs} parallel workers.') + with mp.get_context('fork').Pool(self.n_jobs) as pool: + jobs = [pool.apply_async(self._fn, args) for args in tasks] + + # wait for completion + results = [j.get() for j in jobs] + + # Keep the order deterministic + for sys_name, sys_results in results: + for metric, _result in sys_results.items(): + scores[metric].append(_result) + + return self._signatures, scores diff --git a/llmeval-env/lib/python3.10/site-packages/sacrebleu/utils.py b/llmeval-env/lib/python3.10/site-packages/sacrebleu/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..56e6fcab4e22ed13d9e7a33ab73e74304a67ed6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sacrebleu/utils.py @@ -0,0 +1,595 @@ +import json +import os +import re +import sys +import gzip +import math +import hashlib +import logging +import portalocker +from collections import defaultdict +from typing import List, Optional, Sequence, Dict +from argparse import Namespace + +from tabulate import tabulate +import colorama + + +# Where to store downloaded test sets. +# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu. +# +# Querying for a HOME environment variable can result in None (e.g., on Windows) +# in which case the os.path.join() throws a TypeError. Using expanduser() is +# a safe way to get the user's home folder. +USERHOME = os.path.expanduser("~") +SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu')) + +sacrelogger = logging.getLogger('sacrebleu') + + +class Color: + ENABLE_COLORS = True + + @staticmethod + def format(msg: str, color: str) -> str: + """Returns a colored version of the given message string. + + :param msg: The string to Color.format. + :param color: The color specifier i.e. 'red', 'blue', 'green', etc. + :return: A colored version of the string if the output is a terminal. + """ + if not Color.ENABLE_COLORS: + return msg + _ansi_str = getattr(colorama.Fore, color.upper(), None) + if _ansi_str: + return f'{_ansi_str}{msg}{colorama.Style.RESET_ALL}' + + return msg + + +def _format_score_lines(scores: dict, + width: int = 2, + multiline: bool = True) -> Dict[str, List[str]]: + """Formats the scores prior to tabulating them.""" + new_scores = {'System': scores.pop('System')} + p_val_break_char = '\n' if multiline else ' ' + is_bootstrap = False + + def _color_p_value(p: float): + msg = f'(p = {p:.4f})' + if p > 0.05: + return Color.format(msg, 'red') + return msg + '*' + + for metric, vals in scores.items(): + new_vals = [] + + for result in vals: + if not isinstance(result, str): + # Format result instances + _str = f'{result.score:.{width}f}' + if result.mean is not None: + is_bootstrap = True + _str += f' ({result.mean:.{width}f} ± {result.ci:.{width}f})' + if result.p_value is not None: + _str += p_val_break_char + _color_p_value(result.p_value) + else: + # Already formatted in non paired-test mode + _str = result + + new_vals.append(_str) + + if is_bootstrap: + # Change titles + metric += ' (μ ± 95% CI)' + + new_scores[metric] = new_vals + + return new_scores + + +def print_results_table(results: dict, signatures: dict, args: Namespace): + """Prints out a nicely formatted table for multi-system evaluation mode.""" + + if args.format == 'json': + proper_json = [] + dict_keys = list(results.keys()) + for i in range(len(results['System'])): + value = {} + value['system'] = results['System'][i] + # parse metrics + for j in range(1, len(dict_keys)): + if isinstance(results[dict_keys[j]][i], str): + value[dict_keys[j]] = results[dict_keys[j]][i] + else: + # Values inside object as dict + value[dict_keys[j]] = results[dict_keys[j]][i].__dict__ + proper_json.append(value) + + print(json.dumps(proper_json, indent=4)) + return + + tablefmt = args.format + if tablefmt in ('text'): + tablefmt = 'fancy_grid' + elif tablefmt == 'latex': + # Use booktabs + tablefmt = 'latex_booktabs' + + # If paired testing has been given, this'll format the score lines + results = _format_score_lines( + results, args.width, multiline=tablefmt == 'fancy_grid') + + new_dict = {} + + # Color the column names and the baseline system name and scores + has_baseline = False + baseline_name = '' + for name in results.keys(): + val = results[name] + if val[0].startswith('Baseline:') or has_baseline: + if val[0].startswith('Baseline:'): + baseline_name = val[0] + has_baseline = True + val[0] = Color.format(val[0], 'yellow') + new_dict[Color.format(name, 'cyan')] = results[name] + + # Finally tabulate + table = tabulate( + new_dict, headers='keys', tablefmt=tablefmt, + colalign=('right', ), + stralign='center', + numalign='center', + floatfmt=f'.{args.width}f') + + print(table) + print() + + is_paired = args.paired_bs or args.paired_ar + + if is_paired: + test_type = 'bootstrap resampling' if args.paired_bs else 'approximate randomization' + n_samples_or_trials = args.paired_bs_n if args.paired_bs else args.paired_ar_n + test_sample_type = 'resampling trials' if args.paired_bs else 'trials' + msg = f'Paired {test_type} test with {n_samples_or_trials} {test_sample_type}' + + bline = Color.format('baseline', 'yellow') + bline_name = Color.format(baseline_name, 'yellow') + null_hyp = Color.format('Null hypothesis', 'green') + pval_color = Color.format('highlighted in red', 'red') + + # Print fancy header + print('-' * len(msg) + '\n' + msg + '\n' + '-' * len(msg)) + print(f' - Each system is pairwise compared to {bline_name}.') + if args.paired_bs: + print(' Actual system score / bootstrap estimated true mean / 95% CI are provided for each metric.') + else: + print(' Actual system score is provided for each metric.') + print() + print(f' - {null_hyp}: the system and the {bline} translations are essentially') + print(f' generated by the same underlying process. For a given system and the {bline},') + print(' the p-value is roughly the probability of the absolute score difference (delta)') + print(f' or higher occurring due to chance, under the assumption that the {null_hyp.lower()} is correct.') + print() + print(f' - Assuming a significance threshold of 0.05, the {null_hyp.lower()} can be rejected') + print(' for p-values < 0.05 (marked with "*"). This means that the delta is unlikely to be attributed') + print(f' to chance, hence the system is significantly "different" than the {bline}.') + print(f' Otherwise, the p-values are {pval_color}.') + print() + print(f' - NOTE: Significance does not tell whether a system is "better" than the {bline} but rather') + print(' emphasizes the "difference" of the systems in terms of the replicability of the delta.') + print() + + print('-----------------') + print('Metric signatures') + print('-----------------') + for name, sig in signatures.items(): + print(f' - {name:<10} {sig}') + + +def print_single_results(results: List[str], args: Namespace): + """Re-process metric strings to align them nicely.""" + if args.format == 'json': + if len(results) > 1: + proper_json = '[\n' + ',\n'.join(results) + '\n]' + print(proper_json) + else: + print(results[0]) + return + + # Color confidence strings for emphasis + if 'μ' in results[0]: + color_re = re.compile(r'(\(μ = [0-9\.]+ ± [0-9\.]+\))') + for idx in range(len(results)): + results[idx] = color_re.sub( + lambda m: Color.format(m.group(), 'cyan'), results[idx]) + + if len(results) == 1: + # Just one system, nothing to align. + print(results[0]) + return + + # Align by '=' character + lens = [] + for line in results: + # If not score_only, split lines from '=' for re-alignment + try: + lens.append(line.index('=') - 1) + except ValueError: + print(line) + + if len(lens) > 0: + w = max(lens) + for (_len, line) in zip(lens, results): + left, right = line[:_len], line[_len:] + print(f'{left:>{w}}{right}') + + +def sanity_check_lengths(system: Sequence[str], + refs: Sequence[Sequence[str]], + test_set: Optional[str] = None): + n_hyps = len(system) + if any(len(ref_stream) != n_hyps for ref_stream in refs): + sacrelogger.error("System and reference streams have different lengths.") + if test_set: + sacrelogger.error("This could be an issue with your system output " + "or with sacreBLEU's reference database if -t is given.") + sacrelogger.error("For the latter, try cleaning out the cache by typing:\n") + sacrelogger.error(f" rm -r {SACREBLEU_DIR}/{test_set}\n") + sacrelogger.error("The test sets will be re-downloaded the next time you run sacreBLEU.") + sys.exit(1) + + +def smart_open(file, mode='rt', encoding='utf-8'): + """Convenience function for reading compressed or plain text files. + :param file: The file to read. + :param mode: The file mode (read, write). + :param encoding: The file encoding. + """ + if file.endswith('.gz'): + return gzip.open(file, mode=mode, encoding=encoding, newline="\n") + return open(file, mode=mode, encoding=encoding, newline="\n") + + +def my_log(num: float) -> float: + """ + Floors the log function + + :param num: the number + :return: log(num) floored to a very low number + """ + + if num == 0.0: + return -9999999999 + return math.log(num) + + +def sum_of_lists(lists): + """Aggregates list of numeric lists by summing.""" + if len(lists) == 1: + return lists[0] + + # Preserve datatype + size = len(lists[0]) + init_val = type(lists[0][0])(0.0) + total = [init_val] * size + for ll in lists: + for i in range(size): + total[i] += ll[i] + return total + + +def args_to_dict(args, prefix: str, strip_prefix: bool = False): + """Filters argparse's `Namespace` into dictionary with arguments + beginning with the given prefix.""" + prefix += '_' + d = {} + for k, v in args.__dict__.items(): + if k.startswith(prefix): + k = k.replace(prefix, '') if strip_prefix else k + d[k] = v + return d + + +def print_test_set(test_set, langpair, requested_fields, origlang=None, subset=None): + """Prints to STDOUT the specified side of the specified test set. + + :param test_set: the test set to print + :param langpair: the language pair + :param requested_fields: the fields to print + :param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation + :param subset: print only sentences whose document annotation matches a given regex + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + + fieldnames = DATASETS[test_set].fieldnames(langpair) + all_files = DATASETS[test_set].get_files(langpair) + + if "all" in requested_fields and len(requested_fields) != 1: + sacrelogger.error("Cannot use --echo all with other fields") + sys.exit(1) + elif "all" in requested_fields: + requested_fields = fieldnames + + # backwards compatibility: allow "ref" even if not present (choose first) + if "ref" in requested_fields and "ref" not in fieldnames: + replacement_ref = min([f for f in fieldnames if f.startswith("ref")]) + requested_fields = [f if f != "ref" else replacement_ref for f in requested_fields] + + files = [] + for field in requested_fields: + if field not in fieldnames: + sacrelogger.error(f"No such field {field} in test set {test_set} for language pair {langpair}.") + sacrelogger.error(f"available fields for {test_set}/{langpair}: {', '.join(fieldnames)}") + if "ref" not in fieldnames: + subref = min([f for f in fieldnames if f.startswith("ref")]) + sacrelogger.error(f"'ref' also allowed for backwards compatibility (will return {subref})") + sys.exit(1) + index = fieldnames.index(field) + files.append(all_files[index]) + + streams = [smart_open(file) for file in files] + streams = filter_subset(streams, test_set, langpair, origlang, subset) + for lines in zip(*streams): + print('\t'.join(map(lambda x: x.rstrip(), lines))) + + +def get_source_file(test_set: str, langpair: str) -> str: + """ + Returns the source file for a given testset/langpair. + Downloads it first if it is not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: the path to the requested source file + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + + return DATASETS[test_set].get_source_file(langpair) + + +def get_reference_files(test_set: str, langpair: str) -> List[str]: + """ + Returns a list of one or more reference file paths for the given testset/langpair. + Downloads the references first if they are not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: a list of one or more reference file paths + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + return DATASETS[test_set].get_reference_files(langpair) + + +def get_files(test_set, langpair) -> List[str]: + """ + Returns the path of the source file and all reference files for + the provided test set / language pair. + Downloads the references first if they are not already local. + + :param test_set: The test set (e.g., "wmt19") + :param langpair: The language pair (e.g., "de-en") + :return: a list of the source file and all reference files + """ + + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + return DATASETS[test_set].get_files(langpair) + + +def extract_tarball(filepath, destdir): + sacrelogger.info(f'Extracting {filepath} to {destdir}') + if filepath.endswith('.tar.gz') or filepath.endswith('.tgz'): + import tarfile + with tarfile.open(filepath) as tar: + tar.extractall(path=destdir) + elif filepath.endswith('.zip'): + import zipfile + with zipfile.ZipFile(filepath, 'r') as zipfile: + zipfile.extractall(path=destdir) + + +def get_md5sum(dest_path): + # Check md5sum + md5 = hashlib.md5() + with open(dest_path, 'rb') as infile: + for line in infile: + md5.update(line) + return md5.hexdigest() + + +def download_file(source_path, dest_path, extract_to=None, expected_md5=None): + """Downloading utility. + + Downloads the specified test to the system location specified by the SACREBLEU environment variable. + + :param source_path: the remote uri to download + :param dest_path: where to save the file + :param extract_to: for tarballs, where to extract to + :param expected_md5: the MD5 sum + :return: the set of processed file names + """ + import urllib.request + import ssl + + outdir = os.path.dirname(dest_path) + os.makedirs(outdir, exist_ok=True) + + # Make sure to open in mode "a" + lockfile = f"{dest_path}.lock" + with portalocker.Lock(lockfile, timeout=60): + + if not os.path.exists(dest_path) or os.path.getsize(dest_path) == 0: + sacrelogger.info(f"Downloading {source_path} to {dest_path}") + + try: + with urllib.request.urlopen(source_path) as f, open(dest_path, 'wb') as out: + out.write(f.read()) + except ssl.SSLError: + sacrelogger.error('An SSL error was encountered in downloading the files. If you\'re on a Mac, ' + 'you may need to run the "Install Certificates.command" file located in the ' + '"Python 3" folder, often found under /Applications') + sys.exit(1) + + if expected_md5 is not None: + cur_md5 = get_md5sum(dest_path) + if cur_md5 != expected_md5: + sacrelogger.error(f'Fatal: MD5 sum of downloaded file was incorrect (got {cur_md5}, expected {expected_md5}).') + sacrelogger.error(f'Please manually delete {dest_path!r} and rerun the command.') + sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.') + sys.exit(1) + + # Extract the tarball + if extract_to is not None: + extract_tarball(dest_path, extract_to) + + +def download_test_set(test_set, langpair=None): + """Downloads the specified test to the system location specified by the SACREBLEU environment variable. + + :param test_set: the test set to download + :param langpair: the language pair (needed for some datasets) + :return: the set of processed file names + """ + if test_set not in DATASETS: + raise Exception(f"No such test set {test_set}") + dataset = DATASETS[test_set] + file_paths = dataset.get_files(langpair) + return file_paths + + +def get_langpairs_for_testset(testset: str) -> List[str]: + """Return a list of language pairs for a given test set.""" + if testset not in DATASETS: + return [] + return list(DATASETS[testset].langpairs.keys()) + + +def get_available_testsets() -> List[str]: + """Return a list of available test sets.""" + return sorted(DATASETS.keys(), reverse=True) + +def get_available_testsets_for_langpair(langpair: str) -> List[str]: + """Return a list of available test sets for a given language pair""" + parts = langpair.split('-') + srclang = parts[0] + trglang = parts[1] + + testsets = [] + for dataset in DATASETS.values(): + if f'{srclang}-{trglang}' in dataset.langpairs \ + or f'{trglang}-{srclang}' in dataset.langpairs: + testsets.append(dataset.name) + + return testsets + + +def get_available_origlangs(test_sets, langpair) -> List[str]: + """Return a list of origlang values in according to the raw SGM files.""" + if test_sets is None: + return [] + + origlangs = set() + for test_set in test_sets.split(','): + dataset = DATASETS[test_set] + rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', dataset.langpairs[langpair][0]) + if rawfile.endswith('.sgm'): + with smart_open(rawfile) as fin: + for line in fin: + if line.startswith(' +#include +#include +#include +#include + + +// helpers to check for cuda errors +#define CUDA_CHECK(ans) {{\ + gpuAssert((ans), __FILE__, __LINE__);\ + }}\ + +static inline void gpuAssert(CUresult code, const char *file, int line) {{ + if (code != CUDA_SUCCESS) {{ + const char *prefix = "Triton Error [CUDA]: "; + const char *str; + cuGetErrorString(code, &str); + char err[1024] = {{0}}; + strcat(err, prefix); + strcat(err, str); + printf("%s\\n", err); + exit(code); + }} +}} + +// globals +#define CUBIN_NAME {kernel_name}_cubin +CUmodule {kernel_name}_mod = NULL; +CUfunction {kernel_name}_func = NULL; +unsigned char CUBIN_NAME[{bin_size}] = {{ {bin_data} }}; + + +void unload_{kernel_name}(void) {{ + CUDA_CHECK(cuModuleUnload({kernel_name}_mod)); +}} + +// TODO: some code duplication with `runtime/backend/cuda.c` +void load_{kernel_name}() {{ + int dev = 0; + void *bin = (void *)&CUBIN_NAME; + int shared = {shared}; + CUDA_CHECK(cuModuleLoadData(&{kernel_name}_mod, bin)); + CUDA_CHECK(cuModuleGetFunction(&{kernel_name}_func, {kernel_name}_mod, "{triton_kernel_name}")); + // set dynamic shared memory if necessary + int shared_optin; + CUDA_CHECK(cuDeviceGetAttribute(&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, dev)); + if (shared > 49152 && shared_optin > 49152) {{ + CUDA_CHECK(cuFuncSetCacheConfig({kernel_name}_func, CU_FUNC_CACHE_PREFER_SHARED)); + CUDA_CHECK(cuFuncSetAttribute({kernel_name}_func, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_optin)) + }} +}} + +/* +{kernel_docstring} +*/ +CUresult {kernel_name}(CUstream stream, {signature}) {{ + if ({kernel_name}_func == NULL) + load_{kernel_name}(); + unsigned int gX = {gridX}; + unsigned int gY = {gridY}; + unsigned int gZ = {gridZ}; + void *args[{num_args}] = {{ {arg_pointers} }}; + // TODO: shared memory + if(gX * gY * gZ > 0) + return cuLaunchKernel({kernel_name}_func, gX, gY, gZ, {num_warps} * 32, 1, 1, {shared}, stream, args, NULL); +}} diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.h b/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.h new file mode 100644 index 0000000000000000000000000000000000000000..d98b7063b6ae6292b65b61abf5a30c58b7d28e95 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.h @@ -0,0 +1,14 @@ +#ifndef TT_KERNEL_INCLUDES +#define TT_KERNEL_INCLUDES + +#include +#include +#include +#include + +#endif + +void unload_{kernel_name}(void); +void load_{kernel_name}(void); +// tt-linker: {kernel_name}:{full_signature}:{algo_info} +CUresult{_placeholder} {kernel_name}(CUstream stream, {signature}); diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.py b/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.py new file mode 100644 index 0000000000000000000000000000000000000000..51193ba3d84c2f56dfa5b24d843513890bc5210d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/tools/compile.py @@ -0,0 +1,146 @@ +import binascii +import hashlib +import importlib.util +import sys +from argparse import ArgumentParser +from pathlib import Path +from typing import List + +import triton +from triton.compiler.code_generator import kernel_suffix +from triton.compiler.make_launcher import ty_to_cpp + +desc = """ +Triton ahead-of-time compiler: + +This program compiles the kernel with name `kernel-name` in the file at the +provided `path` into self-contained C source-code that embeds the `cubin` +data along with utilities to load, unload and launch the kernel. + +signature is provided as a list of (optionally divisibility-hinted) types +or constexpr values, e.g. + +`compile.py --kernel-name kernel --signature "*fp32:16, i32:16, 1024, i32" --out-name kernel /path/to/kernel.py` + +will compile triton.JITFunction of name `kernel` inside the file `/path/to/kernel.py`. +Said kernel will be specialized such that argument 0, 1 are assumed to be multiple of 16, +and argument 2 is assumed to be a compile-time constant of value 1024, i.e. it won't be part of the generated prototype. + +The resulting entry point will have signature + +CUresult kernel_{specialization_suffix}(CUstream stream, unsigned gX, unsigned gY, unsigned gZ, float* arg0, int32_t arg1, int32_t arg2) + +Different such specialized entry points can be combined using the `linker.py` script. + +NOTE: when resolving the scope of /path/to/kernel.py, the file will be executed from within its parent directory with the python interpreter +used to run this `compile.py` script +""" + +if __name__ == "__main__": + + # command-line arguments + parser = ArgumentParser(description=desc) + parser.add_argument("path", + help="Path to Python source containing desired kernel in its scope. File will be executed.") + parser.add_argument("--kernel-name", "-n", type=str, default="", help="Name of the kernel to compile", + required=True) + parser.add_argument("--num-warps", "-w", type=int, default=1, help="Number of warps to launch the kernel") + parser.add_argument("--num-stages", "-ns", type=int, default=3, + help="Number of stages (meta-parameter of the kernel)") + parser.add_argument("--out-name", "-on", type=str, default=None, help="Out name for the compiled kernel") + parser.add_argument("--out-path", "-o", type=Path, default=None, help="Out filename") + parser.add_argument("--signature", "-s", type=str, help="Signature of the kernel", required=True) + parser.add_argument("--grid", "-g", type=str, help="Launch grid of the kernel", required=True) + args = parser.parse_args() + + out_name = args.out_name if args.out_name else args.kernel_name + out_path = args.out_path if args.out_path else Path(out_name) + + # execute python sources and extract functions wrapped in JITFunction + arg_path = Path(args.path) + sys.path.insert(0, str(arg_path.parent)) + spec = importlib.util.spec_from_file_location(arg_path.stem, arg_path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + kernel = getattr(mod, args.kernel_name) + grid = args.grid.split(",") + assert len(grid) == 3 + + # validate and parse signature + signature = list(map(lambda s: s.strip(" "), args.signature.split(","))) + + def hash_signature(signature: List[str]): + m = hashlib.sha256() + m.update(" ".join(signature).encode()) + return m.hexdigest()[:8] + + meta_sig = f"warps{args.num_warps}xstages{args.num_stages}" + sig_hash = hash_signature(signature + [meta_sig]) + + def constexpr(s): + try: + ret = int(s) + return ret + except ValueError: + pass + try: + ret = float(s) + return ret + except ValueError: + pass + return None + + hints = {i: constexpr(s.split(":")[1]) for i, s in enumerate(signature) if ":" in s} + hints = {k: v for k, v in hints.items() if v is not None} + constants = {i: constexpr(s) for i, s in enumerate(signature)} + constants = {k: v for k, v in constants.items() if v is not None} + signature = {i: s.split(":")[0] for i, s in enumerate(signature) if i not in constants} + const_sig = 'x'.join([str(v) for v in constants.values()]) + doc_string = [f"{kernel.arg_names[i]}={constants[i]}" for i in constants.keys()] + doc_string += [f"num_warps={args.num_warps}", f"num_stages={args.num_stages}"] + + # compile ast into cubin + for h in hints.values(): + assert h in [1, 16], f"Only 1 and 16 are valid hints, got {h}" + divisible_by_16 = [i for i, h in hints.items() if h == 16] + equal_to_1 = [i for i, h in hints.items() if h == 1] + attrs = triton.compiler.AttrsDescriptor(divisible_by_16=divisible_by_16, equal_to_1=equal_to_1) + for i in equal_to_1: + constants.update({i: 1}) + src = triton.compiler.ASTSource(fn=kernel, constants=constants, signature=signature, attrs=attrs) + opts = {"num_warps": args.num_warps, "num_stages": args.num_stages} + ccinfo = triton.compile(src, options=opts) + arg_names = [] + arg_types = [] + for i in signature.keys(): + if i not in equal_to_1: + arg_names += [kernel.arg_names[i]] + arg_types += [signature[i]] + + # dump C stub code + suffix = kernel_suffix(signature.values(), attrs) + func_name = '_'.join([out_name, sig_hash, suffix]) + triton_kernel_name = '_'.join([args.kernel_name, suffix]) + hex_ = str(binascii.hexlify(ccinfo.asm["cubin"]))[2:-1] + params = { + "kernel_name": func_name, + "triton_kernel_name": triton_kernel_name, + "bin_size": len(hex_), + "bin_data": ", ".join([f"0x{x}{y}" for x, y in zip(hex_[::2], hex_[1::2])]), + "signature": ", ".join([f"{ty_to_cpp(ty)} {name}" for name, ty in zip(arg_names, arg_types)]), + "full_signature": ", ".join([f"{ty_to_cpp(signature[i])} {kernel.arg_names[i]}" for i in signature.keys()]), + "arg_pointers": ", ".join([f"&{arg}" for arg in arg_names]), + "num_args": len(arg_names), + "kernel_docstring": doc_string, + "shared": ccinfo.shared, + "num_warps": args.num_warps, + "algo_info": '_'.join([const_sig, meta_sig]), + "gridX": grid[0], + "gridY": grid[1], + "gridZ": grid[2], + "_placeholder": "", + } + for ext in ['h', 'c']: + template_path = Path(__file__).parent / f"compile.{ext}" + with out_path.with_suffix(f".{sig_hash}_{suffix}.{ext}").open("w") as fp: + fp.write(Path(template_path).read_text().format(**params)) diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/disasm.py b/llmeval-env/lib/python3.10/site-packages/triton/tools/disasm.py new file mode 100644 index 0000000000000000000000000000000000000000..1e309a2e4940ed56a432870f19b99d47aeea79f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/tools/disasm.py @@ -0,0 +1,142 @@ +# MIT License + +# Copyright (c) 2020 Da Yan @ HKUST + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import functools +import os +import re +import subprocess +import tempfile + +from ..common.backend import path_to_cuobjdump, path_to_nvdisasm + +FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*') +SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*') +FNAME_RE = re.compile(r'\s*Function : (\w+)\s*') +BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);') + + +def parseCtrl(sline): + enc = int(SLINE_RE.match(sline).group(1), 16) + stall = (enc >> 41) & 0xf + yld = (enc >> 45) & 0x1 + wrtdb = (enc >> 46) & 0x7 + readb = (enc >> 49) & 0x7 + watdb = (enc >> 52) & 0x3f + + yld_str = 'Y' if yld == 0 else '-' + wrtdb_str = '-' if wrtdb == 7 else str(wrtdb) + readb_str = '-' if readb == 7 else str(readb) + watdb_str = '--' if watdb == 0 else f'{watdb:02d}' + return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}' + + +def processSassLines(fline, sline, labels): + asm = FLINE_RE.match(fline).group(1) + # Remove tailing space + if asm.endswith(" ;"): + asm = asm[:-2] + ";" + ctrl = parseCtrl(sline) + # BRA target address + if BRA_RE.match(asm) is not None: + target = int(BRA_RE.match(asm).group(2), 16) + if target in labels: + pass + else: + labels[target] = len(labels) + return (f'{ctrl}', f'{asm}') + + +@functools.lru_cache() +def get_sass(cubin_asm, fun=None): + fd, path = tempfile.mkstemp() + try: + with open(fd, 'wb') as cubin: + cubin.write(cubin_asm) + sass = extract(path, fun) + finally: + os.remove(path) + return sass + + +def extract(file_path, fun): + cuobjdump, _ = path_to_cuobjdump() + nvdisasm, _ = path_to_nvdisasm() + os.environ["NVDISASM_PATH"] = nvdisasm + if fun is None: + sass_str = subprocess.check_output([cuobjdump, "-sass", file_path]) + else: + sass_str = subprocess.check_output([cuobjdump, "-fun", fun, "-sass", file_path]) + sass_lines = sass_str.splitlines() + line_idx = 0 + while line_idx < len(sass_lines): + line = sass_lines[line_idx].decode() + # format: + # function : + # .headerflags: ... + # /*0000*/ asmstr /*0x...*/ + # /*0x...*/ + + # Looking for new function header (function: ) + while FNAME_RE.match(line) is None: + line_idx += 1 + if line_idx < len(sass_lines): + line = sass_lines[line_idx].decode() + else: + return + + fname = FNAME_RE.match(line).group(1) + ret = '' + ret += f'Function:{fname}\n' + line_idx += 2 # bypass .headerflags + line = sass_lines[line_idx].decode() + # Remapping address to label + labels = {} # address -> label_idx + # store sass asm in buffer and them print them (for labels) + # (ctrl, asm) + asm_buffer = [] + while FLINE_RE.match(line) is not None: + # First line (Offset ASM Encoding) + fline = sass_lines[line_idx].decode() + line_idx += 1 + # Second line (Encoding) + sline = sass_lines[line_idx].decode() + line_idx += 1 + asm_buffer.append(processSassLines(fline, sline, labels)) + # peek the next line + line = sass_lines[line_idx].decode() + # Print sass + # label naming convention: LBB#i + for idx, (ctrl, asm) in enumerate(asm_buffer): + # Print label if this is BRA target + offset = idx * 16 + if offset in labels: + label_name = f'LBB{labels[offset]}' + ret += f'{label_name}:\n' + ret += ctrl + '\t' + # if this is BRA, remap offset to label + if BRA_RE.match(asm): + target = int(BRA_RE.match(asm).group(2), 16) + target_name = f'LBB{labels[target]}' + asm = BRA_RE.sub(rf'\1{target_name};', asm) + ret += asm + '\n' + ret += '\n' + return ret diff --git a/llmeval-env/lib/python3.10/site-packages/triton/tools/link.py b/llmeval-env/lib/python3.10/site-packages/triton/tools/link.py new file mode 100644 index 0000000000000000000000000000000000000000..eb39b4bda4dbf30c4acd2e0ac4c2f83af6199a65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/triton/tools/link.py @@ -0,0 +1,322 @@ +from collections import defaultdict +from pathlib import Path +from typing import Sequence, Union + +from dataclasses import dataclass + + +def _exists(x): + return x is not None + + +class LinkerError(Exception): + pass + + +@dataclass +class KernelLinkerMeta: + orig_kernel_name: str + arg_names: Sequence[str] + arg_ctypes: Sequence[str] + sizes: Sequence[Union[int, None]] + sig_hash: str + triton_suffix: str + suffix: str + num_specs: int + """ number of specialized arguments """ + + +class HeaderParser: + + def __init__(self) -> None: + import re + + # [kernel_name, c signature] + self.linker_directives = re.compile("//[\\s]*tt-linker:[\\s]*([\\w]+):(.+):(.+)") + # [name, hash, suffix] + self.kernel_name = re.compile("^([\\w]+)_([\\w]+)_([\\w]+)$") + # [(type, name)] + self.c_sig = re.compile("[\\s]*(\\w+)\\s(\\w+)[,]?") + # [d|c] + self.arg_suffix = re.compile("[c,d]") + + self.kernels = defaultdict(list) + + def extract_linker_meta(self, header: str): + for ln in header.splitlines(): + if ln.startswith("//"): + m = self.linker_directives.match(ln) + if _exists(m): + ker_name, c_sig, algo_info = m.group(1), m.group(2), m.group(3) + name, sig_hash, suffix = self._match_name(ker_name) + c_types, arg_names = self._match_c_sig(c_sig) + num_specs, sizes = self._match_suffix(suffix, c_sig) + self._add_kernel( + "_".join([name, algo_info]), + KernelLinkerMeta( + orig_kernel_name=name, + arg_names=arg_names, + arg_ctypes=c_types, + sizes=sizes, + sig_hash=sig_hash, + triton_suffix=suffix, + suffix=suffix, + num_specs=num_specs, + ), + ) + + def _match_name(self, ker_name: str): + m = self.kernel_name.match(ker_name) + if _exists(m): + name, sig_hash, suffix = m.group(1), m.group(2), m.group(3) + return name, sig_hash, suffix + raise LinkerError(f"{ker_name} is not a valid kernel name") + + def _match_c_sig(self, c_sig: str): + m = self.c_sig.findall(c_sig) + if len(m): + tys, args = [], [] + for ty, arg_name in m: + tys.append(ty) + args.append(arg_name) + return tys, args + + raise LinkerError(f"{c_sig} is not a valid argument signature") + + def _match_suffix(self, suffix: str, c_sig: str): + args = c_sig.split(",") + s2i = {"c": 1, "d": 16} + num_specs = 0 + sizes = [] + # scan through suffix, first find the index, + # then see if it is followed by d or c + for i in range(len(args)): + pos = suffix.find(str(i)) + if pos == -1: + raise LinkerError(f"{suffix} is not a valid kernel suffix") + pos += len(str(i)) + if self.arg_suffix.match(suffix, pos): + num_specs += 1 + sizes.extend([None] * (i - len(sizes))) + sizes.append(s2i[suffix[pos]]) + pos += 1 + if i < len(args) - 1: + suffix = suffix[pos:] + else: + sizes.extend([None] * (len(args) - len(sizes))) + return num_specs, sizes + + def _add_kernel(self, name: str, ker: KernelLinkerMeta): + if name in self.kernels: + last: KernelLinkerMeta = self.kernels[name][-1] + + for cur, new_ in zip(last.arg_ctypes, ker.arg_ctypes): + if cur != new_: + raise LinkerError( + f"Mismatched signature for kernel {name}: \n\texisting sig is: {','.join(last.arg_ctypes)}\n\tcurrent is: {','.join(ker.arg_ctypes)}" + ) + + self.kernels[name].append(ker) + + +def gen_signature_with_full_args(m): + return ", ".join([f"{ty} {arg}" for ty, arg in zip(m.arg_ctypes, m.arg_names)]) + + +def gen_signature(m): + arg_types = [ty for ty, hint in zip(m.arg_ctypes, m.sizes) if hint != 1] + arg_names = [arg for arg, hint in zip(m.arg_names, m.sizes) if hint != 1] + sig = ", ".join([f"{ty} {arg}" for ty, arg in zip(arg_types, arg_names)]) + return sig + + +# generate declarations of kernels with meta-parameter and constant values +def make_algo_decls(name: str, metas: Sequence[KernelLinkerMeta]) -> str: + return f""" +CUresult {name}(CUstream stream, {gen_signature_with_full_args(metas[-1])}); +void load_{name}(); +void unload_{name}(); + """ + + +# generate declarations of kernels with meta-parameter and constant values +def make_global_decl(meta: KernelLinkerMeta) -> str: + return f""" +CUresult {meta.orig_kernel_name}_default(CUstream stream, {gen_signature_with_full_args(meta)}); +CUresult {meta.orig_kernel_name}(CUstream stream, {gen_signature_with_full_args(meta)}, int algo_id); +void load_{meta.orig_kernel_name}(); +void unload_{meta.orig_kernel_name}(); + """ + + +# generate dispatcher function for kernels with different meta-parameter and constant values +def make_default_algo_kernel(meta: KernelLinkerMeta) -> str: + src = f"CUresult {meta.orig_kernel_name}_default(CUstream stream, {gen_signature_with_full_args(meta)}){{\n" + src += (f" return {meta.orig_kernel_name}(stream, {', '.join(meta.arg_names)}, 0);\n") + src += "}\n" + return src + + +# generate dispatcher function for kernels with different integer value hints +def make_kernel_hints_dispatcher(name: str, metas: Sequence[KernelLinkerMeta]) -> str: + src = f"// launcher for: {name}\n" + for meta in sorted(metas, key=lambda m: -m.num_specs): + src += f"CUresult {meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}(CUstream stream, {gen_signature(meta)});\n" + src += "\n" + + src += (f"CUresult {name}(CUstream stream, {gen_signature_with_full_args(metas[-1])}){{") + src += "\n" + for meta in sorted(metas, key=lambda m: -m.num_specs): + cond_fn = ( # + lambda val, hint: f"({val} % {hint} == 0)" # + if hint == 16 # + else f"({val} == {hint})" # + if hint == 1 # + else None) + conds = " && ".join([ # + cond_fn(val, hint) # + for val, hint in zip(meta.arg_names, meta.sizes) # + if hint is not None + ]) + src += (f" if ({conds})\n" if any(meta.sizes) else "if (1)\n" + ) # Edge case where no specializations hence no dispatching required + arg_names = [arg for arg, hint in zip(meta.arg_names, meta.sizes) if hint != 1] + src += f" return {meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}(stream, {', '.join(arg_names)});\n" + src += "\n" + src += " return CUDA_ERROR_INVALID_VALUE;\n" + src += "}\n" + + for mode in ["load", "unload"]: + src += f"\n// {mode} for: {name}\n" + for meta in sorted(metas, key=lambda m: -m.num_specs): + src += f"void {mode}_{meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}();\n" + src += f"void {mode}_{name}() {{" + src += "\n" + for meta in sorted(metas, key=lambda m: -m.num_specs): + src += (f" {mode}_{meta.orig_kernel_name}_{meta.sig_hash}_{meta.suffix}();\n") + src += "}\n" + return src + + +# generate dispatcher function for kernels with different meta-parameter and constant values +def make_kernel_meta_const_dispatcher(meta: KernelLinkerMeta) -> str: + src = f"CUresult {meta.orig_kernel_name}(CUstream stream, {gen_signature_with_full_args(meta)}, int algo_id){{\n" + src += f" assert (algo_id < (int)sizeof({meta.orig_kernel_name}_kernels));\n" + src += f" return {meta.orig_kernel_name}_kernels[algo_id](stream, {', '.join(meta.arg_names)});\n" + src += "}\n" + return src + + +# generate definition of function pointers of kernel dispatchers based on meta-parameter and constant values +def make_func_pointers(names: str, meta: KernelLinkerMeta) -> str: + # the table of hint dispatchers + src = f"typedef CUresult (*kernel_func_t)(CUstream stream, {gen_signature_with_full_args(meta)});\n" + src += f"kernel_func_t {meta.orig_kernel_name}_kernels[] = {{\n" + for name in names: + src += f" {name},\n" + src += "};\n" + return src + + +# generate definition for load/unload functions for kernels with different meta-parameter and constant values +def make_kernel_load_def(names: str, meta: KernelLinkerMeta) -> str: + src = "" + for mode in ["load", "unload"]: + src += f"void {mode}_{meta.orig_kernel_name}(void){{\n" + for name in names: + src += f" {mode}_{name}();\n" + src += "}\n\n" + return src + + +def make_get_num_algos_decl(meta: KernelLinkerMeta) -> str: + src = f"int {meta.orig_kernel_name}_get_num_algos(void);" + return src + + +def make_get_num_algos_def(meta: KernelLinkerMeta) -> str: + src = f"int {meta.orig_kernel_name}_get_num_algos(void){{\n" + src += f" return (int)sizeof({meta.orig_kernel_name}_kernels);\n" + src += "}\n" + return src + + +desc = """ +Triton ahead-of-time linker: + +This program takes in header files generated by compile.py, and generates a +single entry-point responsible for dispatching the user's input to the right +kernel given the specializations that were compiled. + +Example usage: +python link.py /path/to/headers/*.h -o kernel_name +""" + +if __name__ == "__main__": + from argparse import ArgumentParser + + parser = ArgumentParser(description=desc) + parser.add_argument( + "headers", + nargs="+", + help="Paths to header files to link. Must include linker directive annotations (autogenerated by ttc)", + ) + parser.add_argument("--out", "-o", type=Path, help="Out filename") + parser.add_argument( + "--prefix", + type=str, + default="", + help="String to prefix kernel dispatcher names", + ) + args = parser.parse_args() + + # metadata + parser = HeaderParser() + includes = [] + for header in args.headers: + h_path = Path(header) + h_str = h_path.read_text() + includes.append(h_path.name) + parser.extract_linker_meta(h_str) + + # generate headers + algo_decls = [make_algo_decls(name, meta) for name, meta in parser.kernels.items()] + meta_lists = [meta for name, meta in parser.kernels.items()] + meta = meta_lists[0][0] + get_num_algos_decl = make_get_num_algos_decl(meta) + global_decl = make_global_decl(meta) + with args.out.with_suffix(".h").open("w") as fp: + out = "#include \n" + out += "\n".join(algo_decls) + out += "\n" + out += get_num_algos_decl + out += "\n" + out += global_decl + fp.write(out) + + # generate source + defs = [make_kernel_hints_dispatcher(name, meta) for name, meta in parser.kernels.items()] + names = [name for name in parser.kernels.keys()] + func_pointers_def = make_func_pointers(names, meta) + meta_const_def = make_kernel_meta_const_dispatcher(meta) + load_unload_def = make_kernel_load_def(names, meta) + get_num_algos_def = make_get_num_algos_def(meta) + default_algo_kernel = make_default_algo_kernel(meta) + with args.out.with_suffix(".c").open("w") as fp: + out = "" + out += "#include \n" + out += "#include \n" + out += "#include \n" + out += "\n" + out += "\n".join(defs) + out += "\n" + out += func_pointers_def + out += "\n" + out += get_num_algos_def + out += "\n" + out += meta_const_def + out += "\n" + out += load_unload_def + out += "\n" + out += default_algo_kernel + fp.write(out)