applied-ai-018 commited on
Commit
38b0ffe
·
verified ·
1 Parent(s): ffed346

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/hf_ckpt/global_step40/tokenizer.model +3 -0
  2. ckpts/hf_ckpt/tokenizer.model +3 -0
  3. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER +1 -0
  4. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst +28 -0
  5. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA +105 -0
  6. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD +58 -0
  7. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL +5 -0
  8. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt +2 -0
  9. venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt +1 -0
  10. venv/lib/python3.10/site-packages/filelock/__init__.py +52 -0
  11. venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/filelock/_api.py +341 -0
  20. venv/lib/python3.10/site-packages/filelock/_error.py +30 -0
  21. venv/lib/python3.10/site-packages/filelock/_soft.py +47 -0
  22. venv/lib/python3.10/site-packages/filelock/_unix.py +68 -0
  23. venv/lib/python3.10/site-packages/filelock/_util.py +52 -0
  24. venv/lib/python3.10/site-packages/filelock/_windows.py +65 -0
  25. venv/lib/python3.10/site-packages/filelock/py.typed +0 -0
  26. venv/lib/python3.10/site-packages/filelock/version.py +16 -0
  27. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py +195 -0
  36. venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py +116 -0
  37. venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py +207 -0
  38. venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py +11 -0
  39. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py +438 -0
  47. venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py +420 -0
  48. venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py +284 -0
  49. venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py +69 -0
  50. venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py +478 -0
ckpts/hf_ckpt/global_step40/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
ckpts/hf_ckpt/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2007 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/METADATA ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: Jinja2
3
+ Version: 3.1.3
4
+ Summary: A very fast and expressive template engine.
5
+ Home-page: https://palletsprojects.com/p/jinja/
6
+ Maintainer: Pallets
7
+ Maintainer-email: [email protected]
8
+ License: BSD-3-Clause
9
+ Project-URL: Donate, https://palletsprojects.com/donate
10
+ Project-URL: Documentation, https://jinja.palletsprojects.com/
11
+ Project-URL: Changes, https://jinja.palletsprojects.com/changes/
12
+ Project-URL: Source Code, https://github.com/pallets/jinja/
13
+ Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/
14
+ Project-URL: Chat, https://discord.gg/pallets
15
+ Classifier: Development Status :: 5 - Production/Stable
16
+ Classifier: Environment :: Web Environment
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: BSD License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
22
+ Classifier: Topic :: Text Processing :: Markup :: HTML
23
+ Requires-Python: >=3.7
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE.rst
26
+ Requires-Dist: MarkupSafe >=2.0
27
+ Provides-Extra: i18n
28
+ Requires-Dist: Babel >=2.7 ; extra == 'i18n'
29
+
30
+ Jinja
31
+ =====
32
+
33
+ Jinja is a fast, expressive, extensible templating engine. Special
34
+ placeholders in the template allow writing code similar to Python
35
+ syntax. Then the template is passed data to render the final document.
36
+
37
+ It includes:
38
+
39
+ - Template inheritance and inclusion.
40
+ - Define and import macros within templates.
41
+ - HTML templates can use autoescaping to prevent XSS from untrusted
42
+ user input.
43
+ - A sandboxed environment can safely render untrusted templates.
44
+ - AsyncIO support for generating templates and calling async
45
+ functions.
46
+ - I18N support with Babel.
47
+ - Templates are compiled to optimized Python code just-in-time and
48
+ cached, or can be compiled ahead-of-time.
49
+ - Exceptions point to the correct line in templates to make debugging
50
+ easier.
51
+ - Extensible filters, tests, functions, and even syntax.
52
+
53
+ Jinja's philosophy is that while application logic belongs in Python if
54
+ possible, it shouldn't make the template designer's job difficult by
55
+ restricting functionality too much.
56
+
57
+
58
+ Installing
59
+ ----------
60
+
61
+ Install and update using `pip`_:
62
+
63
+ .. code-block:: text
64
+
65
+ $ pip install -U Jinja2
66
+
67
+ .. _pip: https://pip.pypa.io/en/stable/getting-started/
68
+
69
+
70
+ In A Nutshell
71
+ -------------
72
+
73
+ .. code-block:: jinja
74
+
75
+ {% extends "base.html" %}
76
+ {% block title %}Members{% endblock %}
77
+ {% block content %}
78
+ <ul>
79
+ {% for user in users %}
80
+ <li><a href="{{ user.url }}">{{ user.username }}</a></li>
81
+ {% endfor %}
82
+ </ul>
83
+ {% endblock %}
84
+
85
+
86
+ Donate
87
+ ------
88
+
89
+ The Pallets organization develops and supports Jinja and other popular
90
+ packages. In order to grow the community of contributors and users, and
91
+ allow the maintainers to devote more time to the projects, `please
92
+ donate today`_.
93
+
94
+ .. _please donate today: https://palletsprojects.com/donate
95
+
96
+
97
+ Links
98
+ -----
99
+
100
+ - Documentation: https://jinja.palletsprojects.com/
101
+ - Changes: https://jinja.palletsprojects.com/changes/
102
+ - PyPI Releases: https://pypi.org/project/Jinja2/
103
+ - Source Code: https://github.com/pallets/jinja/
104
+ - Issue Tracker: https://github.com/pallets/jinja/issues/
105
+ - Chat: https://discord.gg/pallets
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/RECORD ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
3
+ Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301
4
+ Jinja2-3.1.3.dist-info/RECORD,,
5
+ Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
6
+ Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59
7
+ Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
8
+ jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927
9
+ jinja2/__pycache__/__init__.cpython-310.pyc,,
10
+ jinja2/__pycache__/_identifier.cpython-310.pyc,,
11
+ jinja2/__pycache__/async_utils.cpython-310.pyc,,
12
+ jinja2/__pycache__/bccache.cpython-310.pyc,,
13
+ jinja2/__pycache__/compiler.cpython-310.pyc,,
14
+ jinja2/__pycache__/constants.cpython-310.pyc,,
15
+ jinja2/__pycache__/debug.cpython-310.pyc,,
16
+ jinja2/__pycache__/defaults.cpython-310.pyc,,
17
+ jinja2/__pycache__/environment.cpython-310.pyc,,
18
+ jinja2/__pycache__/exceptions.cpython-310.pyc,,
19
+ jinja2/__pycache__/ext.cpython-310.pyc,,
20
+ jinja2/__pycache__/filters.cpython-310.pyc,,
21
+ jinja2/__pycache__/idtracking.cpython-310.pyc,,
22
+ jinja2/__pycache__/lexer.cpython-310.pyc,,
23
+ jinja2/__pycache__/loaders.cpython-310.pyc,,
24
+ jinja2/__pycache__/meta.cpython-310.pyc,,
25
+ jinja2/__pycache__/nativetypes.cpython-310.pyc,,
26
+ jinja2/__pycache__/nodes.cpython-310.pyc,,
27
+ jinja2/__pycache__/optimizer.cpython-310.pyc,,
28
+ jinja2/__pycache__/parser.cpython-310.pyc,,
29
+ jinja2/__pycache__/runtime.cpython-310.pyc,,
30
+ jinja2/__pycache__/sandbox.cpython-310.pyc,,
31
+ jinja2/__pycache__/tests.cpython-310.pyc,,
32
+ jinja2/__pycache__/utils.cpython-310.pyc,,
33
+ jinja2/__pycache__/visitor.cpython-310.pyc,,
34
+ jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958
35
+ jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447
36
+ jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061
37
+ jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199
38
+ jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433
39
+ jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299
40
+ jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267
41
+ jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253
42
+ jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071
43
+ jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844
44
+ jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862
45
+ jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704
46
+ jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726
47
+ jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085
48
+ jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396
49
+ jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210
50
+ jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550
51
+ jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650
52
+ jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736
53
+ jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
+ jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406
55
+ jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584
56
+ jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905
57
+ jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933
58
+ jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [babel.extractors]
2
+ jinja2 = jinja2.ext:babel_extract[i18n]
venv/lib/python3.10/site-packages/Jinja2-3.1.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ jinja2
venv/lib/python3.10/site-packages/filelock/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A platform independent file lock that supports the with-statement.
3
+
4
+ .. autodata:: filelock.__version__
5
+ :no-value:
6
+
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import sys
12
+ import warnings
13
+ from typing import TYPE_CHECKING
14
+
15
+ from ._api import AcquireReturnProxy, BaseFileLock
16
+ from ._error import Timeout
17
+ from ._soft import SoftFileLock
18
+ from ._unix import UnixFileLock, has_fcntl
19
+ from ._windows import WindowsFileLock
20
+ from .version import version
21
+
22
+ #: version of the project as a string
23
+ __version__: str = version
24
+
25
+
26
+ if sys.platform == "win32": # pragma: win32 cover
27
+ _FileLock: type[BaseFileLock] = WindowsFileLock
28
+ else: # pragma: win32 no cover # noqa: PLR5501
29
+ if has_fcntl:
30
+ _FileLock: type[BaseFileLock] = UnixFileLock
31
+ else:
32
+ _FileLock = SoftFileLock
33
+ if warnings is not None:
34
+ warnings.warn("only soft file lock is available", stacklevel=2)
35
+
36
+ if TYPE_CHECKING:
37
+ FileLock = SoftFileLock
38
+ else:
39
+ #: Alias for the lock, which should be used for the current platform.
40
+ FileLock = _FileLock
41
+
42
+
43
+ __all__ = [
44
+ "AcquireReturnProxy",
45
+ "BaseFileLock",
46
+ "FileLock",
47
+ "SoftFileLock",
48
+ "Timeout",
49
+ "UnixFileLock",
50
+ "WindowsFileLock",
51
+ "__version__",
52
+ ]
venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc ADDED
Binary file (499 Bytes). View file
 
venv/lib/python3.10/site-packages/filelock/_api.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import os
6
+ import time
7
+ import warnings
8
+ from abc import ABC, abstractmethod
9
+ from dataclasses import dataclass
10
+ from threading import local
11
+ from typing import TYPE_CHECKING, Any
12
+ from weakref import WeakValueDictionary
13
+
14
+ from ._error import Timeout
15
+
16
+ if TYPE_CHECKING:
17
+ import sys
18
+ from types import TracebackType
19
+
20
+ if sys.version_info >= (3, 11): # pragma: no cover (py311+)
21
+ from typing import Self
22
+ else: # pragma: no cover (<py311)
23
+ from typing_extensions import Self
24
+
25
+
26
+ _LOGGER = logging.getLogger("filelock")
27
+
28
+
29
+ # This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__
30
+ # is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired
31
+ # again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak)
32
+ class AcquireReturnProxy:
33
+ """A context-aware object that will release the lock file when exiting."""
34
+
35
+ def __init__(self, lock: BaseFileLock) -> None:
36
+ self.lock = lock
37
+
38
+ def __enter__(self) -> BaseFileLock:
39
+ return self.lock
40
+
41
+ def __exit__(
42
+ self,
43
+ exc_type: type[BaseException] | None,
44
+ exc_value: BaseException | None,
45
+ traceback: TracebackType | None,
46
+ ) -> None:
47
+ self.lock.release()
48
+
49
+
50
+ @dataclass
51
+ class FileLockContext:
52
+ """A dataclass which holds the context for a ``BaseFileLock`` object."""
53
+
54
+ # The context is held in a separate class to allow optional use of thread local storage via the
55
+ # ThreadLocalFileContext class.
56
+
57
+ #: The path to the lock file.
58
+ lock_file: str
59
+
60
+ #: The default timeout value.
61
+ timeout: float
62
+
63
+ #: The mode for the lock files
64
+ mode: int
65
+
66
+ #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held
67
+ lock_file_fd: int | None = None
68
+
69
+ #: The lock counter is used for implementing the nested locking mechanism.
70
+ lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0
71
+
72
+
73
+ class ThreadLocalFileContext(FileLockContext, local):
74
+ """A thread local version of the ``FileLockContext`` class."""
75
+
76
+
77
+ class BaseFileLock(ABC, contextlib.ContextDecorator):
78
+ """Abstract base class for a file lock object."""
79
+
80
+ _instances: WeakValueDictionary[str, BaseFileLock]
81
+
82
+ def __new__( # noqa: PLR0913
83
+ cls,
84
+ lock_file: str | os.PathLike[str],
85
+ timeout: float = -1,
86
+ mode: int = 0o644,
87
+ thread_local: bool = True, # noqa: ARG003, FBT001, FBT002
88
+ *,
89
+ is_singleton: bool = False,
90
+ **kwargs: dict[str, Any], # capture remaining kwargs for subclasses # noqa: ARG003
91
+ ) -> Self:
92
+ """Create a new lock object or if specified return the singleton instance for the lock file."""
93
+ if not is_singleton:
94
+ return super().__new__(cls)
95
+
96
+ instance = cls._instances.get(str(lock_file))
97
+ if not instance:
98
+ instance = super().__new__(cls)
99
+ cls._instances[str(lock_file)] = instance
100
+ elif timeout != instance.timeout or mode != instance.mode:
101
+ msg = "Singleton lock instances cannot be initialized with differing arguments"
102
+ raise ValueError(msg)
103
+
104
+ return instance # type: ignore[return-value] # https://github.com/python/mypy/issues/15322
105
+
106
+ def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None:
107
+ """Setup unique state for lock subclasses."""
108
+ super().__init_subclass__(**kwargs)
109
+ cls._instances = WeakValueDictionary()
110
+
111
+ def __init__( # noqa: PLR0913
112
+ self,
113
+ lock_file: str | os.PathLike[str],
114
+ timeout: float = -1,
115
+ mode: int = 0o644,
116
+ thread_local: bool = True, # noqa: FBT001, FBT002
117
+ *,
118
+ is_singleton: bool = False,
119
+ ) -> None:
120
+ """
121
+ Create a new lock object.
122
+
123
+ :param lock_file: path to the file
124
+ :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \
125
+ the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \
126
+ to a negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock.
127
+ :param mode: file permissions for the lockfile
128
+ :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \
129
+ ``False`` then the lock will be reentrant across threads.
130
+ :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \
131
+ per lock file. This is useful if you want to use the lock object for reentrant locking without needing \
132
+ to pass the same object around.
133
+
134
+ """
135
+ self._is_thread_local = thread_local
136
+ self._is_singleton = is_singleton
137
+
138
+ # Create the context. Note that external code should not work with the context directly and should instead use
139
+ # properties of this class.
140
+ kwargs: dict[str, Any] = {
141
+ "lock_file": os.fspath(lock_file),
142
+ "timeout": timeout,
143
+ "mode": mode,
144
+ }
145
+ self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs)
146
+
147
+ def is_thread_local(self) -> bool:
148
+ """:return: a flag indicating if this lock is thread local or not"""
149
+ return self._is_thread_local
150
+
151
+ @property
152
+ def is_singleton(self) -> bool:
153
+ """:return: a flag indicating if this lock is singleton or not"""
154
+ return self._is_singleton
155
+
156
+ @property
157
+ def lock_file(self) -> str:
158
+ """:return: path to the lock file"""
159
+ return self._context.lock_file
160
+
161
+ @property
162
+ def timeout(self) -> float:
163
+ """
164
+ :return: the default timeout value, in seconds
165
+
166
+ .. versionadded:: 2.0.0
167
+ """
168
+ return self._context.timeout
169
+
170
+ @timeout.setter
171
+ def timeout(self, value: float | str) -> None:
172
+ """
173
+ Change the default timeout value.
174
+
175
+ :param value: the new value, in seconds
176
+
177
+ """
178
+ self._context.timeout = float(value)
179
+
180
+ @property
181
+ def mode(self) -> int:
182
+ """:return: the file permissions for the lockfile"""
183
+ return self._context.mode
184
+
185
+ @abstractmethod
186
+ def _acquire(self) -> None:
187
+ """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file."""
188
+ raise NotImplementedError
189
+
190
+ @abstractmethod
191
+ def _release(self) -> None:
192
+ """Releases the lock and sets self._context.lock_file_fd to None."""
193
+ raise NotImplementedError
194
+
195
+ @property
196
+ def is_locked(self) -> bool:
197
+ """
198
+
199
+ :return: A boolean indicating if the lock file is holding the lock currently.
200
+
201
+ .. versionchanged:: 2.0.0
202
+
203
+ This was previously a method and is now a property.
204
+ """
205
+ return self._context.lock_file_fd is not None
206
+
207
+ @property
208
+ def lock_counter(self) -> int:
209
+ """:return: The number of times this lock has been acquired (but not yet released)."""
210
+ return self._context.lock_counter
211
+
212
+ def acquire(
213
+ self,
214
+ timeout: float | None = None,
215
+ poll_interval: float = 0.05,
216
+ *,
217
+ poll_intervall: float | None = None,
218
+ blocking: bool = True,
219
+ ) -> AcquireReturnProxy:
220
+ """
221
+ Try to acquire the file lock.
222
+
223
+ :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and
224
+ if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired
225
+ :param poll_interval: interval of trying to acquire the lock file
226
+ :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead
227
+ :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
228
+ first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
229
+ :raises Timeout: if fails to acquire lock within the timeout period
230
+ :return: a context object that will unlock the file when the context is exited
231
+
232
+ .. code-block:: python
233
+
234
+ # You can use this method in the context manager (recommended)
235
+ with lock.acquire():
236
+ pass
237
+
238
+ # Or use an equivalent try-finally construct:
239
+ lock.acquire()
240
+ try:
241
+ pass
242
+ finally:
243
+ lock.release()
244
+
245
+ .. versionchanged:: 2.0.0
246
+
247
+ This method returns now a *proxy* object instead of *self*,
248
+ so that it can be used in a with statement without side effects.
249
+
250
+ """
251
+ # Use the default timeout, if no timeout is provided.
252
+ if timeout is None:
253
+ timeout = self._context.timeout
254
+
255
+ if poll_intervall is not None:
256
+ msg = "use poll_interval instead of poll_intervall"
257
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
258
+ poll_interval = poll_intervall
259
+
260
+ # Increment the number right at the beginning. We can still undo it, if something fails.
261
+ self._context.lock_counter += 1
262
+
263
+ lock_id = id(self)
264
+ lock_filename = self.lock_file
265
+ start_time = time.perf_counter()
266
+ try:
267
+ while True:
268
+ if not self.is_locked:
269
+ _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
270
+ self._acquire()
271
+ if self.is_locked:
272
+ _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
273
+ break
274
+ if blocking is False:
275
+ _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
276
+ raise Timeout(lock_filename) # noqa: TRY301
277
+ if 0 <= timeout < time.perf_counter() - start_time:
278
+ _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
279
+ raise Timeout(lock_filename) # noqa: TRY301
280
+ msg = "Lock %s not acquired on %s, waiting %s seconds ..."
281
+ _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
282
+ time.sleep(poll_interval)
283
+ except BaseException: # Something did go wrong, so decrement the counter.
284
+ self._context.lock_counter = max(0, self._context.lock_counter - 1)
285
+ raise
286
+ return AcquireReturnProxy(lock=self)
287
+
288
+ def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002
289
+ """
290
+ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0.
291
+ Also note, that the lock file itself is not automatically deleted.
292
+
293
+ :param force: If true, the lock counter is ignored and the lock is released in every case/
294
+
295
+ """
296
+ if self.is_locked:
297
+ self._context.lock_counter -= 1
298
+
299
+ if self._context.lock_counter == 0 or force:
300
+ lock_id, lock_filename = id(self), self.lock_file
301
+
302
+ _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
303
+ self._release()
304
+ self._context.lock_counter = 0
305
+ _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
306
+
307
+ def __enter__(self) -> Self:
308
+ """
309
+ Acquire the lock.
310
+
311
+ :return: the lock object
312
+
313
+ """
314
+ self.acquire()
315
+ return self
316
+
317
+ def __exit__(
318
+ self,
319
+ exc_type: type[BaseException] | None,
320
+ exc_value: BaseException | None,
321
+ traceback: TracebackType | None,
322
+ ) -> None:
323
+ """
324
+ Release the lock.
325
+
326
+ :param exc_type: the exception type if raised
327
+ :param exc_value: the exception value if raised
328
+ :param traceback: the exception traceback if raised
329
+
330
+ """
331
+ self.release()
332
+
333
+ def __del__(self) -> None:
334
+ """Called when the lock object is deleted."""
335
+ self.release(force=True)
336
+
337
+
338
+ __all__ = [
339
+ "AcquireReturnProxy",
340
+ "BaseFileLock",
341
+ ]
venv/lib/python3.10/site-packages/filelock/_error.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+
6
+ class Timeout(TimeoutError): # noqa: N818
7
+ """Raised when the lock could not be acquired in *timeout* seconds."""
8
+
9
+ def __init__(self, lock_file: str) -> None:
10
+ super().__init__()
11
+ self._lock_file = lock_file
12
+
13
+ def __reduce__(self) -> str | tuple[Any, ...]:
14
+ return self.__class__, (self._lock_file,) # Properly pickle the exception
15
+
16
+ def __str__(self) -> str:
17
+ return f"The file lock '{self._lock_file}' could not be acquired."
18
+
19
+ def __repr__(self) -> str:
20
+ return f"{self.__class__.__name__}({self.lock_file!r})"
21
+
22
+ @property
23
+ def lock_file(self) -> str:
24
+ """:return: The path of the file lock."""
25
+ return self._lock_file
26
+
27
+
28
+ __all__ = [
29
+ "Timeout",
30
+ ]
venv/lib/python3.10/site-packages/filelock/_soft.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES, EEXIST
7
+ from pathlib import Path
8
+
9
+ from ._api import BaseFileLock
10
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
11
+
12
+
13
+ class SoftFileLock(BaseFileLock):
14
+ """Simply watches the existence of the lock file."""
15
+
16
+ def _acquire(self) -> None:
17
+ raise_on_not_writable_file(self.lock_file)
18
+ ensure_directory_exists(self.lock_file)
19
+ # first check for exists and read-only mode as the open will mask this case as EEXIST
20
+ flags = (
21
+ os.O_WRONLY # open for writing only
22
+ | os.O_CREAT
23
+ | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists
24
+ | os.O_TRUNC # truncate the file to zero byte
25
+ )
26
+ try:
27
+ file_handler = os.open(self.lock_file, flags, self._context.mode)
28
+ except OSError as exception: # re-raise unless expected exception
29
+ if not (
30
+ exception.errno == EEXIST # lock already exist
31
+ or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock
32
+ ): # pragma: win32 no cover
33
+ raise
34
+ else:
35
+ self._context.lock_file_fd = file_handler
36
+
37
+ def _release(self) -> None:
38
+ assert self._context.lock_file_fd is not None # noqa: S101
39
+ os.close(self._context.lock_file_fd) # the lock file is definitely not None
40
+ self._context.lock_file_fd = None
41
+ with suppress(OSError): # the file is already deleted and that's what we want
42
+ Path(self.lock_file).unlink()
43
+
44
+
45
+ __all__ = [
46
+ "SoftFileLock",
47
+ ]
venv/lib/python3.10/site-packages/filelock/_unix.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import ENOSYS
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from ._api import BaseFileLock
11
+ from ._util import ensure_directory_exists
12
+
13
+ #: a flag to indicate if the fcntl API is available
14
+ has_fcntl = False
15
+ if sys.platform == "win32": # pragma: win32 cover
16
+
17
+ class UnixFileLock(BaseFileLock):
18
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
19
+
20
+ def _acquire(self) -> None:
21
+ raise NotImplementedError
22
+
23
+ def _release(self) -> None:
24
+ raise NotImplementedError
25
+
26
+ else: # pragma: win32 no cover
27
+ try:
28
+ import fcntl
29
+ except ImportError:
30
+ pass
31
+ else:
32
+ has_fcntl = True
33
+
34
+ class UnixFileLock(BaseFileLock):
35
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
36
+
37
+ def _acquire(self) -> None:
38
+ ensure_directory_exists(self.lock_file)
39
+ open_flags = os.O_RDWR | os.O_TRUNC
40
+ if not Path(self.lock_file).exists():
41
+ open_flags |= os.O_CREAT
42
+ fd = os.open(self.lock_file, open_flags, self._context.mode)
43
+ with suppress(PermissionError): # This locked is not owned by this UID
44
+ os.fchmod(fd, self._context.mode)
45
+ try:
46
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
47
+ except OSError as exception:
48
+ os.close(fd)
49
+ if exception.errno == ENOSYS: # NotImplemented error
50
+ msg = "FileSystem does not appear to support flock; use SoftFileLock instead"
51
+ raise NotImplementedError(msg) from exception
52
+ else:
53
+ self._context.lock_file_fd = fd
54
+
55
+ def _release(self) -> None:
56
+ # Do not remove the lockfile:
57
+ # https://github.com/tox-dev/py-filelock/issues/31
58
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
59
+ fd = cast(int, self._context.lock_file_fd)
60
+ self._context.lock_file_fd = None
61
+ fcntl.flock(fd, fcntl.LOCK_UN)
62
+ os.close(fd)
63
+
64
+
65
+ __all__ = [
66
+ "UnixFileLock",
67
+ "has_fcntl",
68
+ ]
venv/lib/python3.10/site-packages/filelock/_util.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import stat
5
+ import sys
6
+ from errno import EACCES, EISDIR
7
+ from pathlib import Path
8
+
9
+
10
+ def raise_on_not_writable_file(filename: str) -> None:
11
+ """
12
+ Raise an exception if attempting to open the file for writing would fail.
13
+
14
+ This is done so files that will never be writable can be separated from files that are writable but currently
15
+ locked.
16
+
17
+ :param filename: file to check
18
+ :raises OSError: as if the file was opened for writing.
19
+
20
+ """
21
+ try: # use stat to do exists + can write to check without race condition
22
+ file_stat = os.stat(filename) # noqa: PTH116
23
+ except OSError:
24
+ return # swallow does not exist or other errors
25
+
26
+ if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it
27
+ if not (file_stat.st_mode & stat.S_IWUSR):
28
+ raise PermissionError(EACCES, "Permission denied", filename)
29
+
30
+ if stat.S_ISDIR(file_stat.st_mode):
31
+ if sys.platform == "win32": # pragma: win32 cover
32
+ # On Windows, this is PermissionError
33
+ raise PermissionError(EACCES, "Permission denied", filename)
34
+ else: # pragma: win32 no cover # noqa: RET506
35
+ # On linux / macOS, this is IsADirectoryError
36
+ raise IsADirectoryError(EISDIR, "Is a directory", filename)
37
+
38
+
39
+ def ensure_directory_exists(filename: Path | str) -> None:
40
+ """
41
+ Ensure the directory containing the file exists (create it if necessary).
42
+
43
+ :param filename: file.
44
+
45
+ """
46
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
47
+
48
+
49
+ __all__ = [
50
+ "ensure_directory_exists",
51
+ "raise_on_not_writable_file",
52
+ ]
venv/lib/python3.10/site-packages/filelock/_windows.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from ._api import BaseFileLock
11
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
12
+
13
+ if sys.platform == "win32": # pragma: win32 cover
14
+ import msvcrt
15
+
16
+ class WindowsFileLock(BaseFileLock):
17
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
18
+
19
+ def _acquire(self) -> None:
20
+ raise_on_not_writable_file(self.lock_file)
21
+ ensure_directory_exists(self.lock_file)
22
+ flags = (
23
+ os.O_RDWR # open for read and write
24
+ | os.O_CREAT # create file if not exists
25
+ | os.O_TRUNC # truncate file if not empty
26
+ )
27
+ try:
28
+ fd = os.open(self.lock_file, flags, self._context.mode)
29
+ except OSError as exception:
30
+ if exception.errno != EACCES: # has no access to this lock
31
+ raise
32
+ else:
33
+ try:
34
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
35
+ except OSError as exception:
36
+ os.close(fd) # close file first
37
+ if exception.errno != EACCES: # file is already locked
38
+ raise
39
+ else:
40
+ self._context.lock_file_fd = fd
41
+
42
+ def _release(self) -> None:
43
+ fd = cast(int, self._context.lock_file_fd)
44
+ self._context.lock_file_fd = None
45
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
46
+ os.close(fd)
47
+
48
+ with suppress(OSError): # Probably another instance of the application hat acquired the file lock.
49
+ Path(self.lock_file).unlink()
50
+
51
+ else: # pragma: win32 no cover
52
+
53
+ class WindowsFileLock(BaseFileLock):
54
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
55
+
56
+ def _acquire(self) -> None:
57
+ raise NotImplementedError
58
+
59
+ def _release(self) -> None:
60
+ raise NotImplementedError
61
+
62
+
63
+ __all__ = [
64
+ "WindowsFileLock",
65
+ ]
venv/lib/python3.10/site-packages/filelock/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/filelock/version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '3.13.4'
16
+ __version_tuple__ = version_tuple = (3, 13, 4)
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (65.2 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc ADDED
Binary file (475 Bytes). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc ADDED
Binary file (2.23 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/dataset/base.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The base class for all types of datasets.
3
+ """
4
+ import os
5
+ import re
6
+ from abc import ABCMeta, abstractmethod
7
+ from typing import Dict, List, Optional
8
+
9
+ from ..utils import SACREBLEU_DIR, download_file, smart_open
10
+
11
+
12
+ class Dataset(metaclass=ABCMeta):
13
+ def __init__(
14
+ self,
15
+ name: str,
16
+ data: Optional[List[str]] = None,
17
+ description: Optional[str] = None,
18
+ citation: Optional[str] = None,
19
+ md5: Optional[List[str]] = None,
20
+ langpairs=Dict[str, List[str]],
21
+ **kwargs,
22
+ ):
23
+ """
24
+ Params come from the values in DATASETS.
25
+
26
+ :param name: Name of the dataset.
27
+ :param data: URL of the raw data of the dataset.
28
+ :param description: Description of the dataset.
29
+ :param citation: Citation for the dataset.
30
+ :param md5: MD5 checksum of the dataset.
31
+ :param langpairs: List of available language pairs.
32
+ """
33
+ self.name = name
34
+ self.data = data
35
+ self.description = description
36
+ self.citation = citation
37
+ self.md5 = md5
38
+ self.langpairs = langpairs
39
+ self.kwargs = kwargs
40
+
41
+ # Don't do any downloading or further processing now.
42
+ # Only do that lazily, when asked.
43
+
44
+ # where to store the dataset
45
+ self._outdir = os.path.join(SACREBLEU_DIR, self.name)
46
+ self._rawdir = os.path.join(self._outdir, "raw")
47
+
48
+ def maybe_download(self):
49
+ """
50
+ If the dataset isn't downloaded, use utils/download_file()
51
+ This can be implemented here in the base class. It should write
52
+ to ~/.sacreleu/DATASET/raw exactly as it does now.
53
+ """
54
+ os.makedirs(self._rawdir, exist_ok=True)
55
+
56
+ expected_checksums = self.md5 if self.md5 else [None] * len(self.data)
57
+
58
+ for url, expected_md5 in zip(self.data, expected_checksums):
59
+ tarball = os.path.join(self._rawdir, self._get_tarball_filename(url))
60
+
61
+ download_file(
62
+ url, tarball, extract_to=self._rawdir, expected_md5=expected_md5
63
+ )
64
+
65
+ @staticmethod
66
+ def _clean(s):
67
+ """
68
+ Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
69
+
70
+ :param s: The string.
71
+ :return: A cleaned-up string.
72
+ """
73
+ return re.sub(r"\s+", " ", s.strip())
74
+
75
+ def _get_tarball_filename(self, url):
76
+ """
77
+ Produces a local filename for tarball.
78
+ :param url: The url to download.
79
+ :return: A name produced from the dataset identifier and the URL basename.
80
+ """
81
+ return self.name.replace("/", "_") + "." + os.path.basename(url)
82
+
83
+ def _get_txt_file_path(self, langpair, fieldname):
84
+ """
85
+ Given the language pair and fieldname, return the path to the text file.
86
+ The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME
87
+
88
+ :param langpair: The language pair.
89
+ :param fieldname: The fieldname.
90
+ :return: The path to the text file.
91
+ """
92
+ # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev"
93
+ name = self.name.replace("/", "_")
94
+ # Colons are used to distinguish multiple references, but are not supported in Windows filenames
95
+ fieldname = fieldname.replace(":", "-")
96
+ return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}")
97
+
98
+ def _get_langpair_metadata(self, langpair):
99
+ """
100
+ Given a language pair, return the metadata for that language pair.
101
+ Deal with errors if the language pair is not available.
102
+
103
+ :param langpair: The language pair. e.g. "en-de"
104
+ :return: Dict format which is same as self.langpairs.
105
+ """
106
+ if langpair is None:
107
+ langpairs = self.langpairs
108
+ elif langpair not in self.langpairs:
109
+ raise Exception(f"No such language pair {self.name}/{langpair}")
110
+ else:
111
+ langpairs = {langpair: self.langpairs[langpair]}
112
+
113
+ return langpairs
114
+
115
+ @abstractmethod
116
+ def process_to_text(self, langpair=None) -> None:
117
+ """Processes raw files to plain text files.
118
+
119
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
120
+ """
121
+ pass
122
+
123
+ def fieldnames(self, langpair) -> List[str]:
124
+ """
125
+ Return a list of all the field names. For most source, this is just
126
+ the source and the reference. For others, it might include the document
127
+ ID for each line, or the original language (origLang).
128
+
129
+ get_files() should return the same number of items as this.
130
+
131
+ :param langpair: The language pair (e.g., "de-en")
132
+ :return: a list of field names
133
+ """
134
+ return ["src", "ref"]
135
+
136
+ def __iter__(self, langpair):
137
+ """
138
+ Iterates over all fields (source, references, and other metadata) defined
139
+ by the dataset.
140
+ """
141
+ all_files = self.get_files(langpair)
142
+ all_fins = [smart_open(f) for f in all_files]
143
+
144
+ for item in zip(*all_fins):
145
+ yield item
146
+
147
+ def source(self, langpair):
148
+ """
149
+ Return an iterable over the source lines.
150
+ """
151
+ source_file = self.get_source_file(langpair)
152
+ with smart_open(source_file) as fin:
153
+ for line in fin:
154
+ yield line.strip()
155
+
156
+ def references(self, langpair):
157
+ """
158
+ Return an iterable over the references.
159
+ """
160
+ ref_files = self.get_reference_files(langpair)
161
+ ref_fins = [smart_open(f) for f in ref_files]
162
+
163
+ for item in zip(*ref_fins):
164
+ yield item
165
+
166
+ def get_source_file(self, langpair):
167
+ all_files = self.get_files(langpair)
168
+ all_fields = self.fieldnames(langpair)
169
+ index = all_fields.index("src")
170
+ return all_files[index]
171
+
172
+ def get_reference_files(self, langpair):
173
+ all_files = self.get_files(langpair)
174
+ all_fields = self.fieldnames(langpair)
175
+ ref_files = [
176
+ f for f, field in zip(all_files, all_fields) if field.startswith("ref")
177
+ ]
178
+ return ref_files
179
+
180
+ def get_files(self, langpair):
181
+ """
182
+ Returns the path of the source file and all reference files for
183
+ the provided test set / language pair.
184
+ Downloads the references first if they are not already local.
185
+
186
+ :param langpair: The language pair (e.g., "de-en")
187
+ :return: a list of the source file and all reference files
188
+ """
189
+ fields = self.fieldnames(langpair)
190
+ files = [self._get_txt_file_path(langpair, field) for field in fields]
191
+
192
+ for file in files:
193
+ if not os.path.exists(file):
194
+ self.process_to_text(langpair)
195
+ return files
venv/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ from ..utils import smart_open
5
+ from .base import Dataset
6
+
7
+
8
+ class FakeSGMLDataset(Dataset):
9
+ """
10
+ The fake SGML format used by WMT prior to 2021. Can't be properly parsed.
11
+ Source and reference(s) in separate files.
12
+ """
13
+
14
+ def _convert_format(self, input_file_path, output_filep_path):
15
+ """
16
+ Extract data from raw file and convert to raw txt format.
17
+ """
18
+ with smart_open(input_file_path) as fin, smart_open(
19
+ output_filep_path, "wt"
20
+ ) as fout:
21
+ for line in fin:
22
+ if line.startswith("<seg "):
23
+ line = self._clean(re.sub(r"<seg.*?>(.*)</seg>.*?", "\\1", line))
24
+ print(line, file=fout)
25
+
26
+ def _convert_meta(self, input_file_path, field, output_filep_path):
27
+ """
28
+ Extract metadata from document tags, projects across segments.
29
+ """
30
+ with smart_open(input_file_path) as fin, smart_open(
31
+ output_filep_path, "wt"
32
+ ) as fout:
33
+ value = ""
34
+ for line in fin:
35
+ if line.startswith("<doc "):
36
+ match = re.search(rf'{field}="(.*?)"', line)
37
+ if match is not None:
38
+ value = match.group(1)
39
+
40
+ elif line.startswith("<seg "):
41
+ # print the current value once for each field
42
+ print(value, file=fout)
43
+
44
+ def process_to_text(self, langpair=None):
45
+ """Processes raw files to plain text files.
46
+
47
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
48
+ """
49
+ # ensure that the dataset is downloaded
50
+ self.maybe_download()
51
+ langpairs = self._get_langpair_metadata(langpair)
52
+
53
+ for langpair in langpairs:
54
+ fieldnames = self.fieldnames(langpair)
55
+ origin_files = [
56
+ os.path.join(self._rawdir, path) for path in langpairs[langpair]
57
+ ]
58
+
59
+ # Add the source file three more times for docid, genre, origlang
60
+ origin_files += [
61
+ os.path.join(self._rawdir, langpairs[langpair][0]) for _ in range(3)
62
+ ]
63
+
64
+ for field, origin_file in zip(fieldnames, origin_files):
65
+
66
+ origin_file = os.path.join(self._rawdir, origin_file)
67
+ output_file = self._get_txt_file_path(langpair, field)
68
+
69
+ if field.startswith("src") or field.startswith("ref"):
70
+ self._convert_format(origin_file, output_file)
71
+ else:
72
+ # document metadata keys
73
+ self._convert_meta(origin_file, field, output_file)
74
+
75
+ def fieldnames(self, langpair):
76
+ """
77
+ Return a list of all the field names. For most source, this is just
78
+ the source and the reference. For others, it might include the document
79
+ ID for each line, or the original language (origLang).
80
+
81
+ get_files() should return the same number of items as this.
82
+ """
83
+ meta = self._get_langpair_metadata(langpair)
84
+ length = len(meta[langpair])
85
+
86
+ assert (
87
+ length >= 2
88
+ ), f"Each language pair in {self.name} must have at least 2 fields."
89
+
90
+ fields = ["src"]
91
+
92
+ if length == 2:
93
+ fields.append("ref")
94
+ else:
95
+ for i, _ in enumerate(meta[langpair][1:]):
96
+ fields.append(f"ref:{i}")
97
+
98
+ if not self.name.startswith("wmt08"):
99
+ fields += ["docid", "genre", "origlang"]
100
+
101
+ return fields
102
+
103
+
104
+ class WMTAdditionDataset(FakeSGMLDataset):
105
+ """
106
+ Handle special case of WMT Google addition dataset.
107
+ """
108
+
109
+ def _convert_format(self, input_file_path, output_filep_path):
110
+ if input_file_path.endswith(".sgm"):
111
+ return super()._convert_format(input_file_path, output_filep_path)
112
+ else:
113
+ with smart_open(input_file_path) as fin:
114
+ with smart_open(output_filep_path, "wt") as fout:
115
+ for line in fin:
116
+ print(line.rstrip(), file=fout)
venv/lib/python3.10/site-packages/sacrebleu/dataset/wmt_xml.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import lxml.etree as ET
4
+
5
+ from ..utils import smart_open
6
+ from .base import Dataset
7
+
8
+ from collections import defaultdict
9
+
10
+
11
+ def _get_field_by_translator(translator):
12
+ if not translator:
13
+ return "ref"
14
+ else:
15
+ return f"ref:{translator}"
16
+
17
+ class WMTXMLDataset(Dataset):
18
+ """
19
+ The 2021+ WMT dataset format. Everything is contained in a single file.
20
+ Can be parsed with the lxml parser.
21
+ """
22
+ @staticmethod
23
+ def _unwrap_wmt21_or_later(raw_file):
24
+ """
25
+ Unwraps the XML file from wmt21 or later.
26
+ This script is adapted from https://github.com/wmt-conference/wmt-format-tools
27
+
28
+ :param raw_file: The raw xml file to unwrap.
29
+ :return: Dictionary which contains the following fields:
30
+ - `src`: The source sentences.
31
+ - `docid`: ID indicating which document the sentences belong to.
32
+ - `origlang`: The original language of the document.
33
+ - `ref:{translator}`: The references produced by each translator.
34
+ - `ref`: An alias for the references from the first translator.
35
+ """
36
+ tree = ET.parse(raw_file)
37
+ # Find and check the documents (src, ref, hyp)
38
+ src_langs, ref_langs, translators = set(), set(), set()
39
+ for src_doc in tree.getroot().findall(".//src"):
40
+ src_langs.add(src_doc.get("lang"))
41
+
42
+ for ref_doc in tree.getroot().findall(".//ref"):
43
+ ref_langs.add(ref_doc.get("lang"))
44
+ translator = ref_doc.get("translator")
45
+ translators.add(translator)
46
+
47
+ assert (
48
+ len(src_langs) == 1
49
+ ), f"Multiple source languages found in the file: {raw_file}"
50
+ assert (
51
+ len(ref_langs) == 1
52
+ ), f"Found {len(ref_langs)} reference languages found in the file: {raw_file}"
53
+
54
+ src = []
55
+ docids = []
56
+ orig_langs = []
57
+ domains = []
58
+
59
+ refs = { _get_field_by_translator(translator): [] for translator in translators }
60
+
61
+ systems = defaultdict(list)
62
+
63
+ src_sent_count, doc_count = 0, 0
64
+ for doc in tree.getroot().findall(".//doc"):
65
+ docid = doc.attrib["id"]
66
+ origlang = doc.attrib["origlang"]
67
+ # present wmt22++
68
+ domain = doc.attrib.get("domain", None)
69
+
70
+ # Skip the testsuite
71
+ if "testsuite" in doc.attrib:
72
+ continue
73
+
74
+ doc_count += 1
75
+ src_sents = {
76
+ int(seg.get("id")): seg.text for seg in doc.findall(".//src//seg")
77
+ }
78
+
79
+ def get_sents(doc):
80
+ return {
81
+ int(seg.get("id")): seg.text if seg.text else ""
82
+ for seg in doc.findall(".//seg")
83
+ }
84
+
85
+ ref_docs = doc.findall(".//ref")
86
+
87
+ trans_to_ref = {
88
+ ref_doc.get("translator"): get_sents(ref_doc) for ref_doc in ref_docs
89
+ }
90
+
91
+ hyp_docs = doc.findall(".//hyp")
92
+ hyps = {
93
+ hyp_doc.get("system"): get_sents(hyp_doc) for hyp_doc in hyp_docs
94
+ }
95
+
96
+ for seg_id in sorted(src_sents.keys()):
97
+ # no ref translation is available for this segment
98
+ if not any([value.get(seg_id, "") for value in trans_to_ref.values()]):
99
+ continue
100
+ for translator in translators:
101
+ refs[_get_field_by_translator(translator)].append(
102
+ trans_to_ref.get(translator, {translator: {}}).get(seg_id, "")
103
+ )
104
+ src.append(src_sents[seg_id])
105
+ for system_name in hyps.keys():
106
+ systems[system_name].append(hyps[system_name][seg_id])
107
+ docids.append(docid)
108
+ orig_langs.append(origlang)
109
+ if domain is not None:
110
+ domains.append(domain)
111
+ src_sent_count += 1
112
+
113
+ data = {"src": src, **refs, "docid": docids, "origlang": orig_langs, **systems}
114
+ if len(domains):
115
+ data["domain"] = domains
116
+
117
+ return data
118
+
119
+ def _get_langpair_path(self, langpair):
120
+ """
121
+ Returns the path for this language pair.
122
+ This is useful because in WMT22, the language-pair data structure can be a dict,
123
+ in order to allow for overriding which test set to use.
124
+ """
125
+ langpair_data = self._get_langpair_metadata(langpair)[langpair]
126
+ rel_path = langpair_data["path"] if isinstance(langpair_data, dict) else langpair_data[0]
127
+ return os.path.join(self._rawdir, rel_path)
128
+
129
+ def process_to_text(self, langpair=None):
130
+ """Processes raw files to plain text files.
131
+
132
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
133
+ """
134
+ # ensure that the dataset is downloaded
135
+ self.maybe_download()
136
+
137
+ for langpair in sorted(self._get_langpair_metadata(langpair).keys()):
138
+ # The data type can be a list of paths, or a dict, containing the "path"
139
+ # and an override on which labeled reference to use (key "refs")
140
+ rawfile = self._get_langpair_path(langpair)
141
+
142
+ with smart_open(rawfile) as fin:
143
+ fields = self._unwrap_wmt21_or_later(fin)
144
+
145
+ for fieldname in fields:
146
+ textfile = self._get_txt_file_path(langpair, fieldname)
147
+
148
+ # skip if the file already exists
149
+ if os.path.exists(textfile) and os.path.getsize(textfile) > 0:
150
+ continue
151
+
152
+ with smart_open(textfile, "w") as fout:
153
+ for line in fields[fieldname]:
154
+ print(self._clean(line), file=fout)
155
+
156
+ def _get_langpair_allowed_refs(self, langpair):
157
+ """
158
+ Returns the preferred references for this language pair.
159
+ This can be set in the language pair block (as in WMT22), and backs off to the
160
+ test-set-level default, or nothing.
161
+
162
+ There is one exception. In the metadata, sometimes there is no translator field
163
+ listed (e.g., wmt22:liv-en). In this case, the reference is set to "", and the
164
+ field "ref" is returned.
165
+ """
166
+ defaults = self.kwargs.get("refs", [])
167
+ langpair_data = self._get_langpair_metadata(langpair)[langpair]
168
+ if isinstance(langpair_data, dict):
169
+ allowed_refs = langpair_data.get("refs", defaults)
170
+ else:
171
+ allowed_refs = defaults
172
+ allowed_refs = [_get_field_by_translator(ref) for ref in allowed_refs]
173
+
174
+ return allowed_refs
175
+
176
+ def get_reference_files(self, langpair):
177
+ """
178
+ Returns the requested reference files.
179
+ This is defined as a default at the test-set level, and can be overridden per language.
180
+ """
181
+ # Iterate through the (label, file path) pairs, looking for permitted labels
182
+ allowed_refs = self._get_langpair_allowed_refs(langpair)
183
+ all_files = self.get_files(langpair)
184
+ all_fields = self.fieldnames(langpair)
185
+ ref_files = [
186
+ f for f, field in zip(all_files, all_fields) if field in allowed_refs
187
+ ]
188
+ return ref_files
189
+
190
+ def fieldnames(self, langpair):
191
+ """
192
+ Return a list of all the field names. For most source, this is just
193
+ the source and the reference. For others, it might include the document
194
+ ID for each line, or the original language (origLang).
195
+
196
+ get_files() should return the same number of items as this.
197
+
198
+ :param langpair: The language pair (e.g., "de-en")
199
+ :return: a list of field names
200
+ """
201
+ self.maybe_download()
202
+ rawfile = self._get_langpair_path(langpair)
203
+
204
+ with smart_open(rawfile) as fin:
205
+ fields = self._unwrap_wmt21_or_later(fin)
206
+
207
+ return list(fields.keys())
venv/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of various metrics."""
2
+
3
+ from .bleu import BLEU, BLEUScore # noqa: F401
4
+ from .chrf import CHRF, CHRFScore # noqa: F401
5
+ from .ter import TER, TERScore # noqa: F401
6
+
7
+ METRICS = {
8
+ 'BLEU': BLEU,
9
+ 'CHRF': CHRF,
10
+ 'TER': TER,
11
+ }
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (442 Bytes). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/bleu.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/chrf.cpython-310.pyc ADDED
Binary file (9.17 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (3.07 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc ADDED
Binary file (7.29 kB). View file
 
venv/lib/python3.10/site-packages/sacrebleu/metrics/base.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The base `Score`, `Metric` and `Signature` classes to derive from.
2
+
3
+ `Metric` is an abstract class that enforces the implementation of a set
4
+ of abstract methods. This way, a correctly implemented metric will work
5
+ seamlessly with the rest of the codebase.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ import statistics
11
+ from typing import List, Sequence, Any, Optional, Dict
12
+ from abc import ABCMeta, abstractmethod
13
+
14
+ from .. import __version__
15
+
16
+ sacrelogger = logging.getLogger('sacrebleu')
17
+
18
+
19
+ class Score:
20
+ """A base score class to derive from.
21
+
22
+ :param name: The name of the underlying metric.
23
+ :param score: A floating point number for the final metric.
24
+ """
25
+ def __init__(self, name: str, score: float):
26
+ """`Score` initializer."""
27
+ self.name = name
28
+ self.score = score
29
+
30
+ # Statistical test related fields
31
+ self._mean = -1.0
32
+ self._ci = -1.0
33
+
34
+ # More info can be added right after the score
35
+ self._verbose = ''
36
+
37
+ def format(self, width: int = 2, score_only: bool = False,
38
+ signature: str = '', is_json: bool = False) -> str:
39
+ """Returns a pretty representation of the score.
40
+ :param width: Floating point decimal precision width.
41
+ :param score_only: If `True`, and the format is not `json`,
42
+ returns a single score string.
43
+ :param signature: A string representation of the given `Signature`
44
+ instance.
45
+ :param is_json: If `True`, will output the score in JSON string.
46
+ :return: A plain or JSON-formatted string representation.
47
+ """
48
+ d = {
49
+ 'name': self.name,
50
+ 'score': float(f'{self.score:.{width}f}'),
51
+ 'signature': signature,
52
+ }
53
+
54
+ sc = f'{self.score:.{width}f}'
55
+
56
+ if self._mean > 0:
57
+ confidence_mean = f'{self._mean:.{width}f}'
58
+ confidence_var = f'{self._ci:.{width}f}'
59
+ confidence_str = f'μ = {confidence_mean} ± {confidence_var}'
60
+
61
+ sc += f' ({confidence_str})'
62
+ if is_json:
63
+ d['confidence_mean'] = float(confidence_mean)
64
+ d['confidence_var'] = float(confidence_var)
65
+ d['confidence'] = confidence_str
66
+
67
+ # Construct full score line
68
+ full_score = f"{self.name}|{signature}" if signature else self.name
69
+ full_score = f"{full_score} = {sc}"
70
+ if self._verbose:
71
+ full_score += f' {self._verbose}'
72
+ d['verbose_score'] = self._verbose
73
+
74
+ if score_only:
75
+ return sc
76
+
77
+ if is_json:
78
+ for param in signature.split('|'):
79
+ key, value = param.split(':')
80
+ d[key] = value
81
+ return json.dumps(d, indent=1, ensure_ascii=False)
82
+
83
+ return full_score
84
+
85
+ def estimate_ci(self, scores: List['Score']):
86
+ """Takes a list of scores and stores mean, stdev and 95% confidence
87
+ interval around the mean.
88
+
89
+ :param scores: A list of `Score` objects obtained from bootstrap
90
+ resampling for example.
91
+ """
92
+ # Sort the scores
93
+ raw_scores = sorted([x.score for x in scores])
94
+ n = len(raw_scores)
95
+
96
+ # Get CI bounds (95%, i.e. 1/40 from left)
97
+ lower_idx = n // 40
98
+ upper_idx = n - lower_idx - 1
99
+ lower, upper = raw_scores[lower_idx], raw_scores[upper_idx]
100
+ self._ci = 0.5 * (upper - lower)
101
+ self._mean = statistics.mean(raw_scores)
102
+
103
+ def __repr__(self):
104
+ """Returns a human readable score string."""
105
+ return self.format()
106
+
107
+
108
+ class Signature:
109
+ """A convenience class to represent sacreBLEU reproducibility signatures.
110
+
111
+ :param args: key-value dictionary passed from the actual metric instance.
112
+ """
113
+ def __init__(self, args: dict):
114
+ """`Signature` initializer."""
115
+ # Global items that are shared across all metrics
116
+ self._abbr = {
117
+ 'version': 'v',
118
+ 'nrefs': '#',
119
+ 'test': 't',
120
+ 'lang': 'l',
121
+ 'subset': 'S',
122
+ 'origlang': 'o',
123
+ 'bs': 'bs', # Bootstrap resampling trials
124
+ 'ar': 'ar', # Approximate randomization trials
125
+ 'seed': 'rs', # RNG's seed
126
+ }
127
+
128
+ if 'num_refs' not in args:
129
+ raise ValueError(
130
+ 'Number of references unknown, please evaluate the metric first.')
131
+
132
+ num_refs = args['num_refs']
133
+ if num_refs == -1:
134
+ # Detect variable number of refs
135
+ num_refs = 'var'
136
+
137
+ # Global items that are shared across all metrics
138
+ # None's will be ignored
139
+ self.info = {
140
+ 'version': __version__,
141
+ 'nrefs': num_refs,
142
+ 'bs': args.get('n_bootstrap', None),
143
+ 'ar': None,
144
+ 'seed': args.get('seed', None),
145
+ 'test': args.get('test_set', None),
146
+ 'lang': args.get('langpair', None),
147
+ 'origlang': args.get('origlang', None),
148
+ 'subset': args.get('subset', None),
149
+ }
150
+
151
+ def format(self, short: bool = False) -> str:
152
+ """Returns a string representation of the signature.
153
+
154
+ :param short: If True, shortened signature is produced.
155
+ :return: A string representation of the signature.
156
+ """
157
+ pairs = []
158
+ keys = list(self.info.keys())
159
+ # keep version always at end
160
+ keys.remove('version')
161
+ for name in keys + ['version']:
162
+ value = self.info[name]
163
+ if value is not None:
164
+ if isinstance(value, bool):
165
+ # Replace True/False with yes/no
166
+ value = 'yes' if value else 'no'
167
+ final_name = self._abbr[name] if short else name
168
+ pairs.append(f'{final_name}:{value}')
169
+
170
+ return '|'.join(pairs)
171
+
172
+ def update(self, key: str, value: Any):
173
+ """Add a new item or update an existing one.
174
+
175
+ :param key: The key to use in the dictionary.
176
+ :param value: The associated value for the `key`.
177
+ """
178
+ self.info[key] = value
179
+
180
+ def __str__(self):
181
+ """Returns a human-readable signature string."""
182
+ return self.format()
183
+
184
+ def __repr__(self):
185
+ """Returns a human-readable signature string."""
186
+ return self.format()
187
+
188
+
189
+ class Metric(metaclass=ABCMeta):
190
+ """A base class for all metrics that ensures the implementation of some
191
+ methods. Much of the common functionality is moved to this base class
192
+ from other metrics."""
193
+
194
+ # Each metric should define its Signature class' name here
195
+ _SIGNATURE_TYPE = Signature
196
+
197
+ def __init__(self):
198
+ """`Metric` initializer."""
199
+ # The pre-computed reference cache
200
+ self._ref_cache = None
201
+
202
+ # only useful for BLEU tokenized warnings. Set to True so that
203
+ # warnings are not issued for other metrics.
204
+ self._force = True
205
+
206
+ # Will be used by the signature when bootstrap resampling
207
+ self.n_bootstrap = None
208
+ self.seed = None
209
+
210
+ def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]):
211
+ """Performs sanity checks on `sentence_score` method's arguments.
212
+
213
+ :param hyp: A single hypothesis string.
214
+ :param refs: A sequence of reference strings.
215
+ """
216
+ prefix = self.__class__.__name__
217
+ err_msg = None
218
+
219
+ if not isinstance(hyp, str):
220
+ err_msg = 'The argument `hyp` should be a string.'
221
+ elif isinstance(refs, str) or not isinstance(refs, Sequence):
222
+ err_msg = 'The argument `refs` should be a sequence of strings.'
223
+ elif not isinstance(refs[0], str) and refs[0] is not None:
224
+ err_msg = 'Each element of `refs` should be a string.'
225
+
226
+ if err_msg:
227
+ raise TypeError(f'{prefix}: {err_msg}')
228
+
229
+ def _check_corpus_score_args(self, hyps: Sequence[str],
230
+ refs: Optional[Sequence[Sequence[str]]]):
231
+ """Performs sanity checks on `corpus_score` method's arguments.
232
+
233
+ :param hypses: A sequence of hypothesis strings.
234
+ :param refs: A sequence of reference documents with document being
235
+ defined as a sequence of reference strings. If `None`, cached references
236
+ will be used.
237
+ """
238
+
239
+ prefix = self.__class__.__name__
240
+ err_msg = None
241
+
242
+ if not isinstance(hyps, Sequence):
243
+ err_msg = "`hyps` should be a sequence of strings."
244
+ elif not isinstance(hyps[0], str):
245
+ err_msg = 'Each element of `hyps` should be a string.'
246
+ elif any(line is None for line in hyps):
247
+ err_msg = "Undefined line in hypotheses stream!"
248
+
249
+ if refs is not None:
250
+ if not isinstance(refs, Sequence):
251
+ err_msg = "`refs` should be a sequence of sequence of strings."
252
+ elif not isinstance(refs[0], Sequence):
253
+ err_msg = "Each element of `refs` should be a sequence of strings."
254
+ elif not isinstance(refs[0][0], str) and refs[0][0] is not None:
255
+ err_msg = "`refs` should be a sequence of sequence of strings."
256
+
257
+ if err_msg:
258
+ raise TypeError(f'{prefix}: {err_msg}')
259
+
260
+ @abstractmethod
261
+ def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any:
262
+ """Computes the final score given the pre-computed match statistics.
263
+
264
+ :param stats: A list of segment-level statistics.
265
+ :return: A `Score` instance.
266
+ """
267
+ pass
268
+
269
+ @abstractmethod
270
+ def _compute_score_from_stats(self, stats: List[Any]) -> Any:
271
+ """Computes the final score from already aggregated statistics.
272
+
273
+ :param stats: A list or numpy array of segment-level statistics.
274
+ :return: A `Score` object.
275
+ """
276
+ pass
277
+
278
+ @abstractmethod
279
+ def _preprocess_segment(self, sent: str) -> str:
280
+ """A wrapper around the metric's tokenization and pre-processing logic.
281
+ This should be implemented for reference caching to work correctly.
282
+
283
+ :param sent: The input sentence.
284
+ :return: The pre-processed output sentence.
285
+ """
286
+ pass
287
+
288
+ @abstractmethod
289
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
290
+ """Given a list of reference segments, extract the required
291
+ information (such as n-grams for BLEU and chrF). This should be implemented
292
+ for the generic `_cache_references()` to work across all metrics.
293
+
294
+ :param refs: A sequence of strings.
295
+ """
296
+ pass
297
+
298
+ @abstractmethod
299
+ def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]:
300
+ """Given a (pre-processed) hypothesis sentence and already computed
301
+ reference info, returns the best match statistics across the
302
+ references. The return type is usually a List of ints or floats.
303
+
304
+ :param hypothesis: A pre-processed hypothesis sentence.
305
+ :param ref_kwargs: A dictionary with reference-related information
306
+ within. This is formulated as a dictionary as different metrics may
307
+ require different information regarding a reference segment.
308
+ """
309
+ pass
310
+
311
+ def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]:
312
+ """Given the full set of document references, extract segment n-grams
313
+ (or other necessary information) for caching purposes.
314
+
315
+ :param references: A sequence of reference documents with document being
316
+ defined as a sequence of reference strings. A particular reference
317
+ segment can be '' or `None` to allow the use of variable number
318
+ of references per segment.
319
+ :return: A list where each element is a tuple of segment n-grams and
320
+ reference lengths, as returned by `_extract_reference_info()`.
321
+ """
322
+ ref_cache = []
323
+
324
+ # Decide on final number of refs here as well
325
+ num_refs = set()
326
+
327
+ for refs in zip(*references):
328
+ # Remove undefined references
329
+ lines = [x for x in refs if x is not None]
330
+
331
+ # Keep track of reference counts to allow variable reference
332
+ # info in the signature
333
+ num_refs.add(len(lines))
334
+
335
+ lines = [self._preprocess_segment(x) for x in lines]
336
+
337
+ # Get n-grams
338
+ ref_cache.append(self._extract_reference_info(lines))
339
+
340
+ if len(num_refs) == 1:
341
+ self.num_refs = list(num_refs)[0]
342
+ else:
343
+ # A variable number of refs exist
344
+ self.num_refs = -1
345
+
346
+ return ref_cache
347
+
348
+ def _extract_corpus_statistics(self, hypotheses: Sequence[str],
349
+ references: Optional[Sequence[Sequence[str]]]) -> Any:
350
+ """Reads the corpus and returns sentence-level match statistics for
351
+ faster re-computations esp. during statistical tests.
352
+
353
+ :param hypotheses: A sequence of hypothesis strings.
354
+ :param references: A sequence of reference documents with document being
355
+ defined as a sequence of reference strings. If `None`, cached references
356
+ will be used.
357
+ :return: A list where each sublist corresponds to segment statistics.
358
+ """
359
+ # Pre-compute references
360
+ # Don't store the cache as the user is explicitly passing refs
361
+ if references:
362
+ ref_cache = self._cache_references(references)
363
+ elif self._ref_cache:
364
+ ref_cache = self._ref_cache
365
+ else:
366
+ raise RuntimeError('No references provided and the cache is empty.')
367
+
368
+ stats = []
369
+ tok_count = 0
370
+
371
+ for hyp, ref_kwargs in zip(hypotheses, ref_cache):
372
+ # Check for already-tokenized input problem (only for BLEU)
373
+ if not self._force and hyp.endswith(' .'):
374
+ tok_count += 1
375
+
376
+ hyp = self._preprocess_segment(hyp)
377
+
378
+ # Collect stats
379
+ stats.append(self._compute_segment_statistics(hyp, ref_kwargs))
380
+
381
+ if tok_count >= 100:
382
+ sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')")
383
+ sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.")
384
+ sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.")
385
+
386
+ return stats
387
+
388
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any:
389
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
390
+
391
+ :param hypothesis: A single hypothesis string.
392
+ :param references: A sequence of reference strings.
393
+ :return: A `Score` object.
394
+ """
395
+ self._check_sentence_score_args(hypothesis, references)
396
+
397
+ stats = self._extract_corpus_statistics(
398
+ [hypothesis], [[refs] for refs in references])
399
+ return self._aggregate_and_compute(stats)
400
+
401
+ def corpus_score(self, hypotheses: Sequence[str],
402
+ references: Optional[Sequence[Sequence[str]]],
403
+ n_bootstrap: int = 1) -> Any:
404
+ """Compute the metric for a corpus against a single (or multiple) reference(s).
405
+
406
+ :param hypotheses: A sequence of hypothesis strings.
407
+ :param references: A sequence of reference documents with document being
408
+ defined as a sequence of reference strings. If `None`, cached references
409
+ will be used.
410
+ :param n_bootstrap: If > 1, provides 95% confidence interval around true mean
411
+ using bootstrap resampling with `n_bootstrap` samples.
412
+ :return: A `Score` object.
413
+ """
414
+ self._check_corpus_score_args(hypotheses, references)
415
+
416
+ # Collect corpus stats
417
+ stats = self._extract_corpus_statistics(hypotheses, references)
418
+
419
+ # Compute the actual system score
420
+ actual_score = self._aggregate_and_compute(stats)
421
+
422
+ if n_bootstrap > 1:
423
+ # Compute bootstrap estimate as well
424
+ # Delayed import is to escape from numpy import if bootstrap
425
+ # is not requested.
426
+ from ..significance import _bootstrap_resample
427
+
428
+ self.n_bootstrap = n_bootstrap
429
+ self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap)
430
+ actual_score.estimate_ci(bs_scores)
431
+
432
+ return actual_score
433
+
434
+ def get_signature(self) -> Signature:
435
+ """Creates and returns the signature for the metric. The creation
436
+ of signatures is delayed as the number of references is resolved
437
+ only at the point of reference caching."""
438
+ return self._SIGNATURE_TYPE(self.__dict__)
venv/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of the BLEU metric (Papineni et al., 2002)."""
2
+
3
+ import math
4
+ import logging
5
+ from importlib import import_module
6
+ from typing import List, Sequence, Optional, Dict, Any
7
+
8
+ from ..utils import my_log, sum_of_lists
9
+
10
+ from .base import Score, Signature, Metric
11
+ from .helpers import extract_all_word_ngrams
12
+
13
+ sacrelogger = logging.getLogger('sacrebleu')
14
+
15
+ # The default for the maximum n-gram order when computing precisions
16
+ MAX_NGRAM_ORDER = 4
17
+
18
+ _TOKENIZERS = {
19
+ 'none': 'tokenizer_none.NoneTokenizer',
20
+ 'zh': 'tokenizer_zh.TokenizerZh',
21
+ '13a': 'tokenizer_13a.Tokenizer13a',
22
+ 'intl': 'tokenizer_intl.TokenizerV14International',
23
+ 'char': 'tokenizer_char.TokenizerChar',
24
+ 'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab',
25
+ 'ko-mecab': 'tokenizer_ko_mecab.TokenizerKoMecab',
26
+ 'spm': 'tokenizer_spm.TokenizerSPM',
27
+ 'flores101': 'tokenizer_spm.Flores101Tokenizer',
28
+ 'flores200': 'tokenizer_spm.Flores200Tokenizer',
29
+ }
30
+
31
+
32
+ def _get_tokenizer(name: str):
33
+ """Dynamically import tokenizer as importing all is slow."""
34
+ module_name, class_name = _TOKENIZERS[name].rsplit('.', 1)
35
+ return getattr(
36
+ import_module(f'.tokenizers.{module_name}', 'sacrebleu'),
37
+ class_name)
38
+
39
+
40
+ class BLEUSignature(Signature):
41
+ """A convenience class to represent the reproducibility signature for BLEU.
42
+
43
+ :param args: key-value dictionary passed from the actual metric instance.
44
+ """
45
+ def __init__(self, args: dict):
46
+ """`BLEUSignature` initializer."""
47
+ super().__init__(args)
48
+
49
+ self._abbr.update({
50
+ 'case': 'c',
51
+ 'eff': 'e',
52
+ 'tok': 'tok',
53
+ 'smooth': 's',
54
+ })
55
+
56
+ # Construct a combined string for smoothing method and value
57
+ smooth_str = args['smooth_method']
58
+ smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str]
59
+
60
+ # If the method requires a parameter, add it within brackets
61
+ if smooth_def is not None:
62
+ # the following can be None if the user wants to use the default
63
+ smooth_val = args['smooth_value']
64
+
65
+ if smooth_val is None:
66
+ smooth_val = smooth_def
67
+
68
+ smooth_str += f'[{smooth_val:.2f}]'
69
+
70
+ self.info.update({
71
+ 'case': 'lc' if args['lowercase'] else 'mixed',
72
+ 'eff': 'yes' if args['effective_order'] else 'no',
73
+ 'tok': args['tokenizer_signature'],
74
+ 'smooth': smooth_str,
75
+ })
76
+
77
+
78
+ class BLEUScore(Score):
79
+ """A convenience class to represent BLEU scores.
80
+
81
+ :param score: The BLEU score.
82
+ :param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order
83
+ :param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order
84
+ :param precisions: List of precisions, 1 <= n <= max_ngram_order
85
+ :param bp: The brevity penalty.
86
+ :param sys_len: The cumulative system length.
87
+ :param ref_len: The cumulative reference length.
88
+ """
89
+ def __init__(self, score: float, counts: List[int], totals: List[int],
90
+ precisions: List[float], bp: float,
91
+ sys_len: int, ref_len: int):
92
+ """`BLEUScore` initializer."""
93
+ super().__init__('BLEU', score)
94
+ self.bp = bp
95
+ self.counts = counts
96
+ self.totals = totals
97
+ self.sys_len = sys_len
98
+ self.ref_len = ref_len
99
+ self.precisions = precisions
100
+
101
+ self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions])
102
+ self.ratio = self.sys_len / self.ref_len if self.ref_len else 0
103
+
104
+ # The verbose part of BLEU
105
+ self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} "
106
+ self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} "
107
+ self._verbose += f"ref_len = {self.ref_len:d})"
108
+
109
+
110
+ class BLEU(Metric):
111
+ """Computes the BLEU metric given hypotheses and references.
112
+
113
+ :param lowercase: If True, lowercased BLEU is computed.
114
+ :param force: Ignore data that looks already tokenized.
115
+ :param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default.
116
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none').
117
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
118
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
119
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
120
+ `True`, if sentence-level BLEU will be computed.
121
+ :param trg_lang: An optional language code to raise potential tokenizer warnings.
122
+ :param references: A sequence of reference documents with document being
123
+ defined as a sequence of reference strings. If given, the reference n-grams
124
+ and lengths will be pre-computed and cached for faster BLEU computation
125
+ across many systems.
126
+ """
127
+
128
+ SMOOTH_DEFAULTS: Dict[str, Optional[float]] = {
129
+ # The defaults for `floor` and `add-k` are obtained from the following paper
130
+ # A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU
131
+ # Boxing Chen and Colin Cherry
132
+ # http://aclweb.org/anthology/W14-3346
133
+ 'none': None, # No value is required
134
+ 'floor': 0.1,
135
+ 'add-k': 1,
136
+ 'exp': None, # No value is required
137
+ }
138
+
139
+ TOKENIZERS = _TOKENIZERS.keys()
140
+
141
+ # mteval-v13a.pl tokenizer unless Chinese or Japanese is provided
142
+ TOKENIZER_DEFAULT = '13a'
143
+
144
+ # Some language specific mappings to use if `trg_lang` is given
145
+ # and the tokenizer is not explicitly specified
146
+ _TOKENIZER_MAP = {
147
+ 'zh': 'zh',
148
+ 'ja': 'ja-mecab',
149
+ 'ko': 'ko-mecab',
150
+ }
151
+
152
+ _SIGNATURE_TYPE = BLEUSignature
153
+
154
+ def __init__(self, lowercase: bool = False,
155
+ force: bool = False,
156
+ tokenize: Optional[str] = None,
157
+ smooth_method: str = 'exp',
158
+ smooth_value: Optional[float] = None,
159
+ max_ngram_order: int = MAX_NGRAM_ORDER,
160
+ effective_order: bool = False,
161
+ trg_lang: str = '',
162
+ references: Optional[Sequence[Sequence[str]]] = None):
163
+ """`BLEU` initializer."""
164
+ super().__init__()
165
+
166
+ self._force = force
167
+ self.trg_lang = trg_lang
168
+ self.lowercase = lowercase
169
+ self.smooth_value = smooth_value
170
+ self.smooth_method = smooth_method
171
+ self.max_ngram_order = max_ngram_order
172
+ self.effective_order = effective_order
173
+
174
+ # Sanity check
175
+ assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \
176
+ "Unknown smooth_method {self.smooth_method!r}"
177
+
178
+ # If the tokenizer wasn't specified, choose it according to the
179
+ # following logic. We use 'v13a' except for ZH and JA. Note that
180
+ # this logic can only be applied when sacrebleu knows the target
181
+ # language, which is only the case for builtin datasets.
182
+ if tokenize is None:
183
+ best_tokenizer = self.TOKENIZER_DEFAULT
184
+
185
+ # Set `zh` or `ja-mecab` or `ko-mecab` if target language is provided
186
+ if self.trg_lang in self._TOKENIZER_MAP:
187
+ best_tokenizer = self._TOKENIZER_MAP[self.trg_lang]
188
+ else:
189
+ best_tokenizer = tokenize
190
+ if self.trg_lang == 'zh' and best_tokenizer != 'zh':
191
+ sacrelogger.warning(
192
+ "Consider using the 'zh' or 'spm' tokenizer for Chinese.")
193
+ if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab':
194
+ sacrelogger.warning(
195
+ "Consider using the 'ja-mecab' or 'spm' tokenizer for Japanese.")
196
+ if self.trg_lang == 'ko' and best_tokenizer != 'ko-mecab':
197
+ sacrelogger.warning(
198
+ "Consider using the 'ko-mecab' or 'spm' tokenizer for Korean.")
199
+
200
+ # Create the tokenizer
201
+ self.tokenizer = _get_tokenizer(best_tokenizer)()
202
+
203
+ # Build the signature
204
+ self.tokenizer_signature = self.tokenizer.signature()
205
+
206
+ if references is not None:
207
+ # Pre-compute reference ngrams and lengths
208
+ self._ref_cache = self._cache_references(references)
209
+
210
+ @staticmethod
211
+ def compute_bleu(correct: List[int],
212
+ total: List[int],
213
+ sys_len: int,
214
+ ref_len: int,
215
+ smooth_method: str = 'none',
216
+ smooth_value=None,
217
+ effective_order: bool = False,
218
+ max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore:
219
+ """Computes BLEU score from its sufficient statistics with smoothing.
220
+
221
+ Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
222
+ Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
223
+
224
+ - none: No smoothing.
225
+ - floor: Method 1 (requires small positive value (0.1 in the paper) to be set)
226
+ - add-k: Method 2 (Generalizing Lin and Och, 2004)
227
+ - exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl)
228
+
229
+ :param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order
230
+ :param total: List of counts of total ngrams, 1 <= n <= max_ngram_order
231
+ :param sys_len: The cumulative system length
232
+ :param ref_len: The cumulative reference length
233
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
234
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
235
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
236
+ `True`, if sentence-level BLEU will be computed.
237
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
238
+ :return: A `BLEUScore` instance.
239
+ """
240
+ assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \
241
+ "Unknown smooth_method {smooth_method!r}"
242
+
243
+ # Fetch the default value for floor and add-k
244
+ if smooth_value is None:
245
+ smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method]
246
+
247
+ # Compute brevity penalty
248
+ if sys_len < ref_len:
249
+ bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
250
+ else:
251
+ bp = 1.0
252
+
253
+ # n-gram precisions
254
+ precisions = [0.0 for x in range(max_ngram_order)]
255
+
256
+ # Early stop if there are no matches (#141)
257
+ if not any(correct):
258
+ return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len)
259
+
260
+ smooth_mteval = 1.
261
+ eff_order = max_ngram_order
262
+ for n in range(1, len(precisions) + 1):
263
+ if smooth_method == 'add-k' and n > 1:
264
+ correct[n - 1] += smooth_value
265
+ total[n - 1] += smooth_value
266
+
267
+ if total[n - 1] == 0:
268
+ break
269
+
270
+ # If the system guesses no i-grams, 1 <= i <= max_ngram_order,
271
+ # the BLEU score is 0 (technically undefined). This is a problem for sentence
272
+ # level BLEU or a corpus of short sentences, where systems will get
273
+ # no credit if sentence lengths fall under the max_ngram_order threshold.
274
+ # This fix scales max_ngram_order to the observed maximum order.
275
+ # It is only available through the API and off by default
276
+ if effective_order:
277
+ eff_order = n
278
+
279
+ if correct[n - 1] == 0:
280
+ if smooth_method == 'exp':
281
+ smooth_mteval *= 2
282
+ precisions[n - 1] = 100. / (smooth_mteval * total[n - 1])
283
+ elif smooth_method == 'floor':
284
+ precisions[n - 1] = 100. * smooth_value / total[n - 1]
285
+ else:
286
+ precisions[n - 1] = 100. * correct[n - 1] / total[n - 1]
287
+
288
+ # Compute BLEU score
289
+ score = bp * math.exp(
290
+ sum([my_log(p) for p in precisions[:eff_order]]) / eff_order)
291
+
292
+ return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len)
293
+
294
+ def _preprocess_segment(self, sent: str) -> str:
295
+ """Given a sentence, lowercases (optionally) and tokenizes it
296
+ :param sent: The input sentence string.
297
+ :return: The pre-processed output string.
298
+ """
299
+ if self.lowercase:
300
+ sent = sent.lower()
301
+ return self.tokenizer(sent.rstrip())
302
+
303
+ def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore:
304
+ """Computes the final score from already aggregated statistics.
305
+
306
+ :param stats: A list or numpy array of segment-level statistics.
307
+ :return: A `BLEUScore` object.
308
+ """
309
+ return self.compute_bleu(
310
+ correct=stats[2: 2 + self.max_ngram_order],
311
+ total=stats[2 + self.max_ngram_order:],
312
+ sys_len=int(stats[0]), ref_len=int(stats[1]),
313
+ smooth_method=self.smooth_method, smooth_value=self.smooth_value,
314
+ effective_order=self.effective_order,
315
+ max_ngram_order=self.max_ngram_order
316
+ )
317
+
318
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore:
319
+ """Computes the final BLEU score given the pre-computed corpus statistics.
320
+
321
+ :param stats: A list of segment-level statistics
322
+ :return: A `BLEUScore` instance.
323
+ """
324
+ return self._compute_score_from_stats(sum_of_lists(stats))
325
+
326
+ def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int:
327
+ """Given a hypothesis length and a list of reference lengths, returns
328
+ the closest reference length to be used by BLEU.
329
+
330
+ :param hyp_len: The hypothesis length.
331
+ :param ref_lens: A list of reference lengths.
332
+ :return: The closest reference length.
333
+ """
334
+ closest_diff, closest_len = -1, -1
335
+
336
+ for ref_len in ref_lens:
337
+ diff = abs(hyp_len - ref_len)
338
+ if closest_diff == -1 or diff < closest_diff:
339
+ closest_diff = diff
340
+ closest_len = ref_len
341
+ elif diff == closest_diff and ref_len < closest_len:
342
+ closest_len = ref_len
343
+
344
+ return closest_len
345
+
346
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
347
+ """Given a list of reference segments, extract the n-grams and reference lengths.
348
+ The latter will be useful when comparing hypothesis and reference lengths for BLEU.
349
+
350
+ :param refs: A sequence of strings.
351
+ :return: A dictionary that will be passed to `_compute_segment_statistics()`
352
+ through keyword arguments.
353
+ """
354
+ ngrams = None
355
+ ref_lens = []
356
+
357
+ for ref in refs:
358
+ # extract n-grams for this ref
359
+ this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order)
360
+ ref_lens.append(ref_len)
361
+
362
+ if ngrams is None:
363
+ # Set it directly for first set of refs
364
+ ngrams = this_ngrams
365
+ else:
366
+ # Merge counts across multiple references
367
+ # The below loop is faster than `ngrams |= this_ngrams`
368
+ for ngram, count in this_ngrams.items():
369
+ ngrams[ngram] = max(ngrams[ngram], count)
370
+
371
+ return {'ref_ngrams': ngrams, 'ref_lens': ref_lens}
372
+
373
+ def _compute_segment_statistics(self, hypothesis: str,
374
+ ref_kwargs: Dict) -> List[int]:
375
+ """Given a (pre-processed) hypothesis sentence and already computed
376
+ reference n-grams & lengths, returns the best match statistics across the
377
+ references.
378
+
379
+ :param hypothesis: Hypothesis sentence.
380
+ :param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys
381
+ that denote the counter containing all n-gram counts and reference lengths,
382
+ respectively.
383
+ :return: A list of integers with match statistics.
384
+ """
385
+
386
+ ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens']
387
+
388
+ # Extract n-grams for the hypothesis
389
+ hyp_ngrams, hyp_len = extract_all_word_ngrams(
390
+ hypothesis, 1, self.max_ngram_order)
391
+
392
+ ref_len = self._get_closest_ref_len(hyp_len, ref_lens)
393
+
394
+ # Count the stats
395
+ # Although counter has its internal & and | operators, this is faster
396
+ correct = [0 for i in range(self.max_ngram_order)]
397
+ total = correct[:]
398
+ for hyp_ngram, hyp_count in hyp_ngrams.items():
399
+ # n-gram order
400
+ n = len(hyp_ngram) - 1
401
+ # count hypothesis n-grams
402
+ total[n] += hyp_count
403
+ # count matched n-grams
404
+ if hyp_ngram in ref_ngrams:
405
+ correct[n] += min(hyp_count, ref_ngrams[hyp_ngram])
406
+
407
+ # Return a flattened list for efficient computation
408
+ return [hyp_len, ref_len] + correct + total
409
+
410
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore:
411
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
412
+
413
+ :param hypothesis: A single hypothesis string.
414
+ :param references: A sequence of reference strings.
415
+ :return: a `BLEUScore` object.
416
+ """
417
+ if not self.effective_order:
418
+ sacrelogger.warning(
419
+ 'It is recommended to enable `effective_order` for sentence-level BLEU.')
420
+ return super().sentence_score(hypothesis, references)
venv/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics."""
2
+
3
+ from typing import List, Sequence, Optional, Dict
4
+ from collections import Counter
5
+
6
+ from ..utils import sum_of_lists
7
+ from .base import Score, Signature, Metric
8
+ from .helpers import extract_all_char_ngrams, extract_word_ngrams
9
+
10
+
11
+ class CHRFSignature(Signature):
12
+ """A convenience class to represent the reproducibility signature for chrF.
13
+
14
+ :param args: key-value dictionary passed from the actual metric instance.
15
+ """
16
+ def __init__(self, args: dict):
17
+ """`CHRFSignature` initializer."""
18
+ super().__init__(args)
19
+ self._abbr.update({
20
+ 'case': 'c',
21
+ 'eff': 'e',
22
+ 'nc': 'nc',
23
+ 'nw': 'nw',
24
+ 'space': 's',
25
+ })
26
+
27
+ self.info.update({
28
+ 'case': 'lc' if args['lowercase'] else 'mixed',
29
+ 'eff': 'yes' if not args['eps_smoothing'] else 'no',
30
+ 'nc': args['char_order'],
31
+ 'nw': args['word_order'],
32
+ 'space': 'yes' if args['whitespace'] else 'no',
33
+ })
34
+
35
+
36
+ class CHRFScore(Score):
37
+ """A convenience class to represent chrF scores.
38
+
39
+ :param score: The chrF (chrF++) score.
40
+ :param char_order: The character n-gram order.
41
+ :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++.
42
+ :param beta: Determine the importance of recall w.r.t precision.
43
+ """
44
+ def __init__(self, score: float, char_order: int, word_order: int, beta: int):
45
+ """`CHRFScore` initializer."""
46
+ self.beta = beta
47
+ self.char_order = char_order
48
+ self.word_order = word_order
49
+
50
+ # Add + signs to denote chrF+ variant
51
+ name = f'chrF{self.beta}' + '+' * self.word_order
52
+
53
+ super().__init__(name, score)
54
+
55
+
56
+ class CHRF(Metric):
57
+ """Computes the chrF(++) metric given hypotheses and references.
58
+
59
+ :param char_order: Character n-gram order.
60
+ :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
61
+ :param beta: Determine the importance of recall w.r.t precision.
62
+ :param lowercase: Enable case-insensitivity.
63
+ :param whitespace: If `True`, include whitespaces when extracting character n-grams.
64
+ :param eps_smoothing: If `True`, applies epsilon smoothing similar
65
+ to reference chrF++.py, NLTK and Moses implementations. Otherwise,
66
+ it takes into account effective match order similar to sacreBLEU < 2.0.0.
67
+ :param references: A sequence of reference documents with document being
68
+ defined as a sequence of reference strings. If given, the reference n-grams
69
+ will be pre-computed and cached for faster re-computation across many systems.
70
+ """
71
+
72
+ # Maximum character n-gram order to take into account
73
+ CHAR_ORDER = 6
74
+
75
+ # chrF+ additionally takes into account some of the word n-grams
76
+ WORD_ORDER = 0
77
+
78
+ # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341)
79
+ BETA = 2
80
+
81
+ # Cache string.punctuation for chrF+' punctuation stripper
82
+ _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
83
+
84
+ _SIGNATURE_TYPE = CHRFSignature
85
+
86
+ def __init__(self, char_order: int = CHAR_ORDER,
87
+ word_order: int = WORD_ORDER,
88
+ beta: int = BETA,
89
+ lowercase: bool = False,
90
+ whitespace: bool = False,
91
+ eps_smoothing: bool = False,
92
+ references: Optional[Sequence[Sequence[str]]] = None):
93
+ """`CHRF` initializer."""
94
+ super().__init__()
95
+
96
+ self.beta = beta
97
+ self.char_order = char_order
98
+ self.word_order = word_order
99
+ self.order = self.char_order + self.word_order
100
+ self.lowercase = lowercase
101
+ self.whitespace = whitespace
102
+ self.eps_smoothing = eps_smoothing
103
+
104
+ if references is not None:
105
+ # Pre-compute reference ngrams
106
+ self._ref_cache = self._cache_references(references)
107
+
108
+ @staticmethod
109
+ def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]:
110
+ """Computes the match statistics between hypothesis and reference n-grams.
111
+
112
+ :param hyp_ngrams: A `Counter` holding hypothesis n-grams.
113
+ :param ref_ngrams: A `Counter` holding reference n-grams.
114
+ :return: A list of three numbers denoting hypothesis n-gram count,
115
+ reference n-gram count and the intersection count.
116
+ """
117
+ # Counter's internal intersection is not that fast, count manually
118
+ match_count, hyp_count = 0, 0
119
+ for ng, count in hyp_ngrams.items():
120
+ hyp_count += count
121
+ if ng in ref_ngrams:
122
+ match_count += min(count, ref_ngrams[ng])
123
+
124
+ return [
125
+ # Don't count hits if no reference exists for that n-gram
126
+ hyp_count if ref_ngrams else 0,
127
+ sum(ref_ngrams.values()),
128
+ match_count,
129
+ ]
130
+
131
+ def _remove_punctuation(self, sent: str) -> List[str]:
132
+ """Separates out punctuations from beginning and end of words for chrF.
133
+ Adapted from https://github.com/m-popovic/chrF
134
+
135
+ :param sent: A string.
136
+ :return: A list of words.
137
+ """
138
+ tokenized = []
139
+ for w in sent.split():
140
+ if len(w) == 1:
141
+ tokenized.append(w)
142
+ else:
143
+ # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124)
144
+ if w[-1] in self._PUNCTS:
145
+ tokenized += [w[:-1], w[-1]]
146
+ elif w[0] in self._PUNCTS:
147
+ tokenized += [w[0], w[1:]]
148
+ else:
149
+ tokenized.append(w)
150
+ return tokenized
151
+
152
+ def _preprocess_segment(self, sent: str) -> str:
153
+ """Given a sentence, apply optional lowercasing.
154
+
155
+ :param sent: The input sentence string.
156
+ :return: The pre-processed output string.
157
+ """
158
+ return sent.lower() if self.lowercase else sent
159
+
160
+ def _compute_f_score(self, statistics: List[int]) -> float:
161
+ """Compute the chrF score given the n-gram match statistics.
162
+
163
+ :param statistics: A flattened list of 3 * (`char_order` + `word_order`)
164
+ elements giving the [hyp, ref, match] counts for each order.
165
+ :return: The final f_beta score between [0, 100].
166
+ """
167
+ eps = 1e-16
168
+ score = 0.0
169
+ effective_order = 0
170
+ factor = self.beta ** 2
171
+ avg_prec, avg_rec = 0.0, 0.0
172
+
173
+ for i in range(self.order):
174
+ n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3]
175
+
176
+ # chrF++.py style EPS smoothing (also used by Moses and NLTK)
177
+ prec = n_match / n_hyp if n_hyp > 0 else eps
178
+ rec = n_match / n_ref if n_ref > 0 else eps
179
+
180
+ denom = factor * prec + rec
181
+ score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps
182
+
183
+ # sacreBLEU <2.0.0 style effective order smoothing
184
+ if n_hyp > 0 and n_ref > 0:
185
+ avg_prec += prec
186
+ avg_rec += rec
187
+ effective_order += 1
188
+
189
+ if self.eps_smoothing:
190
+ return 100 * score / self.order
191
+
192
+ if effective_order == 0:
193
+ avg_prec = avg_rec = 0.0
194
+ else:
195
+ avg_prec /= effective_order
196
+ avg_rec /= effective_order
197
+
198
+ if avg_prec + avg_rec:
199
+ score = (1 + factor) * avg_prec * avg_rec
200
+ score /= ((factor * avg_prec) + avg_rec)
201
+ return 100 * score
202
+ else:
203
+ return 0.0
204
+
205
+ def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore:
206
+ """Computes the final score from already aggregated statistics.
207
+
208
+ :param stats: A list or numpy array of segment-level statistics.
209
+ :return: A `CHRFScore` object.
210
+ """
211
+ return CHRFScore(
212
+ self._compute_f_score(stats), self.char_order,
213
+ self.word_order, self.beta)
214
+
215
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore:
216
+ """Computes the final score given the pre-computed corpus statistics.
217
+
218
+ :param stats: A list of segment-level statistics
219
+ :return: A `CHRFScore` object.
220
+ """
221
+ return self._compute_score_from_stats(sum_of_lists(stats))
222
+
223
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]:
224
+ """Given a list of reference segments, extract the character and word n-grams.
225
+
226
+ :param refs: A sequence of reference segments.
227
+ :return: A list where each element contains n-grams per reference segment.
228
+ """
229
+ ngrams = []
230
+
231
+ for ref in refs:
232
+ # extract character n-grams
233
+ stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace)
234
+
235
+ # Check chrF+ mode
236
+ if self.word_order > 0:
237
+ ref_words = self._remove_punctuation(ref)
238
+
239
+ for n in range(self.word_order):
240
+ stats.append(extract_word_ngrams(ref_words, n + 1))
241
+
242
+ ngrams.append(stats)
243
+
244
+ return {'ref_ngrams': ngrams}
245
+
246
+ def _compute_segment_statistics(
247
+ self, hypothesis: str, ref_kwargs: Dict) -> List[int]:
248
+ """Given a (pre-processed) hypothesis sentence and already computed
249
+ reference n-grams, returns the best match statistics across the
250
+ references.
251
+
252
+ :param hypothesis: Hypothesis sentence.
253
+ :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list
254
+ where each sublist contains n-gram counters for a particular reference sentence.
255
+ :return: A list of integers where each triplet denotes [hyp, ref, match]
256
+ statistics.
257
+ """
258
+ best_stats = []
259
+ best_f_score = -1.0
260
+
261
+ # extract character n-grams
262
+ all_hyp_ngrams = extract_all_char_ngrams(
263
+ hypothesis, self.char_order, self.whitespace)
264
+
265
+ # Check chrF+ mode to see if we'll add word n-grams as well
266
+ if self.word_order > 0:
267
+ # Primitive tokenization: separate out punctuations
268
+ hwords = self._remove_punctuation(hypothesis)
269
+ _range = range(1, self.word_order + 1)
270
+ all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range])
271
+
272
+ # Iterate over multiple references, pick the one with best F score
273
+ for _ref_ngrams in ref_kwargs['ref_ngrams']:
274
+ stats = []
275
+ # Traverse all orders
276
+ for h, r in zip(all_hyp_ngrams, _ref_ngrams):
277
+ stats.extend(self._get_match_statistics(h, r))
278
+ f_score = self._compute_f_score(stats)
279
+
280
+ if f_score > best_f_score:
281
+ best_f_score = f_score
282
+ best_stats = stats
283
+
284
+ return best_stats
venv/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Various utility functions for word and character n-gram extraction."""
2
+
3
+ from collections import Counter
4
+ from typing import List, Tuple
5
+
6
+
7
+ def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]:
8
+ """Extracts all ngrams (min_order <= n <= max_order) from a sentence.
9
+
10
+ :param line: A string sentence.
11
+ :param min_order: Minimum n-gram order.
12
+ :param max_order: Maximum n-gram order.
13
+ :return: a Counter object with n-grams counts and the sequence length.
14
+ """
15
+
16
+ ngrams = []
17
+ tokens = line.split()
18
+
19
+ for n in range(min_order, max_order + 1):
20
+ for i in range(0, len(tokens) - n + 1):
21
+ ngrams.append(tuple(tokens[i: i + n]))
22
+
23
+ return Counter(ngrams), len(tokens)
24
+
25
+
26
+ def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
27
+ """Extracts n-grams with order `n` from a list of tokens.
28
+
29
+ :param tokens: A list of tokens.
30
+ :param n: The order of n-grams.
31
+ :return: a Counter object with n-grams counts.
32
+ """
33
+ return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
34
+
35
+
36
+ def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter:
37
+ """Yields counts of character n-grams from a sentence.
38
+
39
+ :param line: A segment containing a sequence of words.
40
+ :param n: The order of the n-grams.
41
+ :param include_whitespace: If given, will not strip whitespaces from the line.
42
+ :return: a dictionary containing ngrams and counts
43
+ """
44
+ if not include_whitespace:
45
+ line = ''.join(line.split())
46
+
47
+ return Counter([line[i:i + n] for i in range(len(line) - n + 1)])
48
+
49
+
50
+ def extract_all_char_ngrams(
51
+ line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]:
52
+ """Extracts all character n-grams at once for convenience.
53
+
54
+ :param line: A segment containing a sequence of words.
55
+ :param max_order: The maximum order of the n-grams.
56
+ :param include_whitespace: If given, will not strip whitespaces from the line.
57
+ :return: a list of Counter objects containing ngrams and counts.
58
+ """
59
+
60
+ counters = []
61
+
62
+ if not include_whitespace:
63
+ line = ''.join(line.split())
64
+
65
+ for n in range(1, max_order + 1):
66
+ ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)])
67
+ counters.append(ngrams)
68
+
69
+ return counters
venv/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module implements various utility functions for the TER metric."""
2
+
3
+ # Copyright 2020 Memsource
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import math
19
+ from typing import List, Tuple, Dict
20
+
21
+
22
+ _COST_INS = 1
23
+ _COST_DEL = 1
24
+ _COST_SUB = 1
25
+
26
+ # Tercom-inspired limits
27
+ _MAX_SHIFT_SIZE = 10
28
+ _MAX_SHIFT_DIST = 50
29
+ _BEAM_WIDTH = 25
30
+
31
+ # Our own limits
32
+ _MAX_CACHE_SIZE = 10000
33
+ _MAX_SHIFT_CANDIDATES = 1000
34
+ _INT_INFINITY = int(1e16)
35
+
36
+ _OP_INS = 'i'
37
+ _OP_DEL = 'd'
38
+ _OP_NOP = ' '
39
+ _OP_SUB = 's'
40
+ _OP_UNDEF = 'x'
41
+
42
+ _FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS)
43
+
44
+
45
+ def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]:
46
+ """Calculate the translation edit rate.
47
+
48
+ :param words_hyp: Tokenized translation hypothesis.
49
+ :param words_ref: Tokenized reference translation.
50
+ :return: tuple (number of edits, length)
51
+ """
52
+ n_words_ref = len(words_ref)
53
+ n_words_hyp = len(words_hyp)
54
+ if n_words_ref == 0:
55
+ # FIXME: This trace here is not used?
56
+ trace = _OP_DEL * n_words_hyp
57
+ # special treatment of empty refs
58
+ return n_words_hyp, 0
59
+
60
+ cached_ed = BeamEditDistance(words_ref)
61
+ shifts = 0
62
+
63
+ input_words = words_hyp
64
+ checked_candidates = 0
65
+ while True:
66
+ # do shifts until they stop reducing the edit distance
67
+ delta, new_input_words, checked_candidates = _shift(
68
+ input_words, words_ref, cached_ed, checked_candidates)
69
+
70
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
71
+ break
72
+
73
+ if delta <= 0:
74
+ break
75
+ shifts += 1
76
+ input_words = new_input_words
77
+
78
+ edit_distance, trace = cached_ed(input_words)
79
+ total_edits = shifts + edit_distance
80
+
81
+ return total_edits, n_words_ref
82
+
83
+
84
+ def _shift(words_h: List[str], words_r: List[str], cached_ed,
85
+ checked_candidates: int) -> Tuple[int, List[str], int]:
86
+ """Attempt to shift words in hypothesis to match reference.
87
+
88
+ Returns the shift that reduces the edit distance the most.
89
+
90
+ Note that the filtering of possible shifts and shift selection are heavily
91
+ based on somewhat arbitrary heuristics. The code here follows as closely
92
+ as possible the logic in Tercom, not always justifying the particular design
93
+ choices.
94
+
95
+ :param words_h: Hypothesis.
96
+ :param words_r: Reference.
97
+ :param cached_ed: Cached edit distance.
98
+ :param checked_candidates: Number of shift candidates that were already
99
+ evaluated.
100
+ :return: (score, shifted_words, checked_candidates). Best shift and updated
101
+ number of evaluated shift candidates.
102
+ """
103
+ pre_score, inv_trace = cached_ed(words_h)
104
+
105
+ # to get alignment, we pretend we are rewriting reference into hypothesis,
106
+ # so we need to flip the trace of edit operations
107
+ trace = _flip_trace(inv_trace)
108
+ align, ref_err, hyp_err = trace_to_alignment(trace)
109
+
110
+ best = None
111
+
112
+ for start_h, start_r, length in _find_shifted_pairs(words_h, words_r):
113
+ # don't do the shift unless both the hypothesis was wrong and the
114
+ # reference doesn't match hypothesis at the target position
115
+ if sum(hyp_err[start_h: start_h + length]) == 0:
116
+ continue
117
+
118
+ if sum(ref_err[start_r: start_r + length]) == 0:
119
+ continue
120
+
121
+ # don't try to shift within the subsequence
122
+ if start_h <= align[start_r] < start_h + length:
123
+ continue
124
+
125
+ prev_idx = -1
126
+ for offset in range(-1, length):
127
+ if start_r + offset == -1:
128
+ idx = 0 # insert before the beginning
129
+ elif start_r + offset in align:
130
+ # Unlike Tercom which inserts *after* the index, we insert
131
+ # *before* the index.
132
+ idx = align[start_r + offset] + 1
133
+ else:
134
+ break # offset is out of bounds => aims past reference
135
+
136
+ if idx == prev_idx:
137
+ continue # skip idx if already tried
138
+
139
+ prev_idx = idx
140
+
141
+ shifted_words = _perform_shift(words_h, start_h, length, idx)
142
+ assert(len(shifted_words) == len(words_h))
143
+
144
+ # Elements of the tuple are designed to replicate Tercom ranking
145
+ # of shifts:
146
+ candidate = (
147
+ pre_score - cached_ed(shifted_words)[0], # highest score first
148
+ length, # then, longest match first
149
+ -start_h, # then, earliest match first
150
+ -idx, # then, earliest target position first
151
+ shifted_words,
152
+ )
153
+
154
+ checked_candidates += 1
155
+
156
+ if not best or candidate > best:
157
+ best = candidate
158
+
159
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
160
+ break
161
+
162
+ if not best:
163
+ return 0, words_h, checked_candidates
164
+ else:
165
+ best_score, _, _, _, shifted_words = best
166
+ return best_score, shifted_words, checked_candidates
167
+
168
+
169
+ def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]:
170
+ """Perform a shift in `words` from `start` to `target`.
171
+
172
+ :param words: Words to shift.
173
+ :param start: Where from.
174
+ :param length: How many words.
175
+ :param target: Where to.
176
+ :return: Shifted words.
177
+ """
178
+ if target < start:
179
+ # shift before previous position
180
+ return words[:target] + words[start: start + length] \
181
+ + words[target: start] + words[start + length:]
182
+ elif target > start + length:
183
+ # shift after previous position
184
+ return words[:start] + words[start + length: target] \
185
+ + words[start: start + length] + words[target:]
186
+ else:
187
+ # shift within the shifted string
188
+ return words[:start] + words[start + length: length + target] \
189
+ + words[start: start + length] + words[length + target:]
190
+
191
+
192
+ def _find_shifted_pairs(words_h: List[str], words_r: List[str]):
193
+ """Find matching word sub-sequences in two lists of words.
194
+
195
+ Ignores sub-sequences starting at the same position.
196
+
197
+ :param words_h: First word list.
198
+ :param words_r: Second word list.
199
+ :return: Yields tuples of (h_start, r_start, length) such that:
200
+ words_h[h_start:h_start+length] = words_r[r_start:r_start+length]
201
+ """
202
+ n_words_h = len(words_h)
203
+ n_words_r = len(words_r)
204
+ for start_h in range(n_words_h):
205
+ for start_r in range(n_words_r):
206
+ # this is slightly different from what tercom does but this should
207
+ # really only kick in in degenerate cases
208
+ if abs(start_r - start_h) > _MAX_SHIFT_DIST:
209
+ continue
210
+
211
+ length = 0
212
+ while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE:
213
+ length += 1
214
+
215
+ yield start_h, start_r, length
216
+
217
+ # If one sequence is consumed, stop processing
218
+ if n_words_h == start_h + length or n_words_r == start_r + length:
219
+ break
220
+
221
+
222
+ def _flip_trace(trace):
223
+ """Flip the trace of edit operations.
224
+
225
+ Instead of rewriting a->b, get a recipe for rewriting b->a.
226
+
227
+ Simply flips insertions and deletions.
228
+ """
229
+ return trace.translate(_FLIP_OPS)
230
+
231
+
232
+ def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]:
233
+ """Transform trace of edit operations into an alignment of the sequences.
234
+
235
+ :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d').
236
+ :return: Alignment, error positions in reference, error positions in hypothesis.
237
+ """
238
+ pos_hyp = -1
239
+ pos_ref = -1
240
+ hyp_err = []
241
+ ref_err = []
242
+ align = {}
243
+
244
+ # we are rewriting a into b
245
+ for op in trace:
246
+ if op == _OP_NOP:
247
+ pos_hyp += 1
248
+ pos_ref += 1
249
+ align[pos_ref] = pos_hyp
250
+ hyp_err.append(0)
251
+ ref_err.append(0)
252
+ elif op == _OP_SUB:
253
+ pos_hyp += 1
254
+ pos_ref += 1
255
+ align[pos_ref] = pos_hyp
256
+ hyp_err.append(1)
257
+ ref_err.append(1)
258
+ elif op == _OP_INS:
259
+ pos_hyp += 1
260
+ hyp_err.append(1)
261
+ elif op == _OP_DEL:
262
+ pos_ref += 1
263
+ align[pos_ref] = pos_hyp
264
+ ref_err.append(1)
265
+ else:
266
+ raise Exception(f"unknown operation {op!r}")
267
+
268
+ return align, ref_err, hyp_err
269
+
270
+
271
+ class BeamEditDistance:
272
+ """Edit distance with several features required for TER calculation.
273
+
274
+ * internal cache
275
+ * "beam" search
276
+ * tracking of edit operations
277
+
278
+ The internal self._cache works like this:
279
+
280
+ Keys are words of the hypothesis. Values are tuples (next_node, row) where:
281
+
282
+ * next_node is the cache for the next word in the sequence
283
+ * row is the stored row of the edit distance matrix
284
+
285
+ Effectively, caching allows to skip several rows in the edit distance
286
+ matrix calculation and instead, to initialize the computation with the last
287
+ matching matrix row.
288
+
289
+ Beam search, as implemented here, only explores a fixed-size sub-row of
290
+ candidates around the matrix diagonal (more precisely, it's a
291
+ "pseudo"-diagonal since we take the ratio of sequence lengths into account).
292
+
293
+ Tracking allows to reconstruct the optimal sequence of edit operations.
294
+
295
+ :param words_ref: A list of reference tokens.
296
+ """
297
+ def __init__(self, words_ref: List[str]):
298
+ """`BeamEditDistance` initializer."""
299
+ self._words_ref = words_ref
300
+ self._n_words_ref = len(self._words_ref)
301
+
302
+ # first row corresponds to insertion operations of the reference,
303
+ # so we do 1 edit operation per reference word
304
+ self._initial_row = [(i * _COST_INS, _OP_INS)
305
+ for i in range(self._n_words_ref + 1)]
306
+
307
+ self._cache = {} # type: Dict[str, Tuple]
308
+ self._cache_size = 0
309
+
310
+ # Precomputed empty matrix row. Contains infinities so that beam search
311
+ # avoids using the uninitialized cells.
312
+ self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1)
313
+
314
+ def __call__(self, words_hyp: List[str]) -> Tuple[int, str]:
315
+ """Calculate edit distance between self._words_ref and the hypothesis.
316
+
317
+ Uses cache to skip some of the computation.
318
+
319
+ :param words_hyp: Words in translation hypothesis.
320
+ :return: Edit distance score.
321
+ """
322
+
323
+ # skip initial words in the hypothesis for which we already know the
324
+ # edit distance
325
+ start_position, dist = self._find_cache(words_hyp)
326
+
327
+ # calculate the rest of the edit distance matrix
328
+ edit_distance, newly_created_matrix, trace = self._edit_distance(
329
+ words_hyp, start_position, dist)
330
+
331
+ # update our cache with the newly calculated rows
332
+ self._add_cache(words_hyp, newly_created_matrix)
333
+
334
+ return edit_distance, trace
335
+
336
+ def _edit_distance(self, words_h: List[str], start_h: int,
337
+ cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]:
338
+ """Actual edit distance calculation.
339
+
340
+ Can be initialized with the last cached row and a start position in
341
+ the hypothesis that it corresponds to.
342
+
343
+ :param words_h: Words in translation hypothesis.
344
+ :param start_h: Position from which to start the calculation.
345
+ (This is zero if no cache match was found.)
346
+ :param cache: Precomputed rows corresponding to edit distance matrix
347
+ before `start_h`.
348
+ :return: Edit distance value, newly computed rows to update the
349
+ cache, trace.
350
+ """
351
+
352
+ n_words_h = len(words_h)
353
+
354
+ # initialize the rest of the matrix with infinite edit distances
355
+ rest_empty = [list(self._empty_row)
356
+ for _ in range(n_words_h - start_h)]
357
+
358
+ dist = cache + rest_empty
359
+
360
+ assert len(dist) == n_words_h + 1
361
+
362
+ length_ratio = self._n_words_ref / n_words_h if words_h else 1
363
+
364
+ # in some crazy sentences, the difference in length is so large that
365
+ # we may end up with zero overlap with previous row
366
+ if _BEAM_WIDTH < length_ratio / 2:
367
+ beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH)
368
+ else:
369
+ beam_width = _BEAM_WIDTH
370
+
371
+ # calculate the Levenshtein distance
372
+ for i in range(start_h + 1, n_words_h + 1):
373
+ pseudo_diag = math.floor(i * length_ratio)
374
+ min_j = max(0, pseudo_diag - beam_width)
375
+ max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width)
376
+
377
+ if i == n_words_h:
378
+ max_j = self._n_words_ref + 1
379
+
380
+ for j in range(min_j, max_j):
381
+ if j == 0:
382
+ dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL)
383
+ else:
384
+ if words_h[i - 1] == self._words_ref[j - 1]:
385
+ cost_sub = 0
386
+ op_sub = _OP_NOP
387
+ else:
388
+ cost_sub = _COST_SUB
389
+ op_sub = _OP_SUB
390
+
391
+ # Tercom prefers no-op/sub, then insertion, then deletion.
392
+ # But since we flip the trace and compute the alignment from
393
+ # the inverse, we need to swap order of insertion and
394
+ # deletion in the preference.
395
+ ops = (
396
+ (dist[i - 1][j - 1][0] + cost_sub, op_sub),
397
+ (dist[i - 1][j][0] + _COST_DEL, _OP_DEL),
398
+ (dist[i][j - 1][0] + _COST_INS, _OP_INS),
399
+ )
400
+
401
+ for op_cost, op_name in ops:
402
+ if dist[i][j][0] > op_cost:
403
+ dist[i][j] = op_cost, op_name
404
+
405
+ # get the trace
406
+ trace = ""
407
+ i = n_words_h
408
+ j = self._n_words_ref
409
+
410
+ while i > 0 or j > 0:
411
+ op = dist[i][j][1]
412
+ trace = op + trace
413
+ if op in (_OP_SUB, _OP_NOP):
414
+ i -= 1
415
+ j -= 1
416
+ elif op == _OP_INS:
417
+ j -= 1
418
+ elif op == _OP_DEL:
419
+ i -= 1
420
+ else:
421
+ raise Exception(f"unknown operation {op!r}")
422
+
423
+ return dist[-1][-1][0], dist[len(cache):], trace
424
+
425
+ def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]):
426
+ """Add newly computed rows to cache.
427
+
428
+ Since edit distance is only calculated on the hypothesis suffix that
429
+ was not in cache, the number of rows in `mat` may be shorter than
430
+ hypothesis length. In that case, we skip over these initial words.
431
+
432
+ :param words_hyp: Hypothesis words.
433
+ :param mat: Edit distance matrix rows for each position.
434
+ """
435
+ if self._cache_size >= _MAX_CACHE_SIZE:
436
+ return
437
+
438
+ node = self._cache
439
+
440
+ n_mat = len(mat)
441
+
442
+ # how many initial words to skip
443
+ skip_num = len(words_hyp) - n_mat
444
+
445
+ # jump through the cache to the current position
446
+ for i in range(skip_num):
447
+ node = node[words_hyp[i]][0]
448
+
449
+ assert len(words_hyp[skip_num:]) == n_mat
450
+
451
+ # update cache with newly computed rows
452
+ for word, row in zip(words_hyp[skip_num:], mat):
453
+ if word not in node:
454
+ node[word] = ({}, tuple(row))
455
+ self._cache_size += 1
456
+ value = node[word]
457
+ node = value[0]
458
+
459
+ def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]:
460
+ """Find the already computed rows of the edit distance matrix in cache.
461
+
462
+ Returns a partially computed edit distance matrix.
463
+
464
+ :param words_hyp: Translation hypothesis.
465
+ :return: Tuple (start position, dist).
466
+ """
467
+ node = self._cache
468
+ start_position = 0
469
+ dist = [self._initial_row]
470
+ for word in words_hyp:
471
+ if word in node:
472
+ start_position += 1
473
+ node, row = node[word]
474
+ dist.append(row)
475
+ else:
476
+ break
477
+
478
+ return start_position, dist