applied-ai-018 commited on
Commit
e7e4eb8
·
verified ·
1 Parent(s): 2d79525

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py +0 -0
  2. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py +45 -0
  3. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/base.py +195 -0
  12. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py +116 -0
  13. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py +8 -0
  14. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py +36 -0
  15. llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py +61 -0
  16. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py +11 -0
  17. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/base.py +438 -0
  22. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py +420 -0
  23. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py +284 -0
  24. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py +69 -0
  25. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py +478 -0
  26. llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/ter.py +195 -0
  27. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py +2 -0
  28. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py +34 -0
  41. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py +19 -0
  42. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py +19 -0
  43. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py +50 -0
  44. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py +52 -0
  45. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py +52 -0
  46. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py +10 -0
  47. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py +38 -0
  48. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py +70 -0
  49. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py +171 -0
  50. llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py +119 -0
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ from . import DATASETS
4
+
5
+ try:
6
+ cmd = sys.argv[1]
7
+ except IndexError:
8
+ print(f"Usage: {sys.argv[0]} --check | --dump")
9
+ sys.exit(1)
10
+
11
+ if cmd == "--check":
12
+ import hashlib
13
+ import urllib.request
14
+
15
+ url_md5 = {}
16
+
17
+ for item in DATASETS.values():
18
+ if item.md5 is not None:
19
+ assert item.data
20
+ assert item.md5
21
+ assert len(item.data) == len(item.md5)
22
+ pairs = zip(item.data, item.md5)
23
+ for url, md5_hash in pairs:
24
+ url_md5[url] = md5_hash
25
+
26
+ for url, md5_hash in url_md5.items():
27
+ try:
28
+ print("Downloading ", url)
29
+ with urllib.request.urlopen(url) as f:
30
+ data = f.read()
31
+ except Exception as exc:
32
+ raise (exc)
33
+
34
+ if hashlib.md5(data).hexdigest() != md5_hash:
35
+ print("MD5 check failed for", url)
36
+ elif cmd == "--dump":
37
+ import re
38
+
39
+ # Dumps a table in markdown format
40
+ print(f'| {"Dataset":<30} | {"Description":<115} |')
41
+ header = "| " + "-" * 30 + " | " + "-" * 115 + " |"
42
+ print(header)
43
+ for name, item in DATASETS.items():
44
+ desc = re.sub(r"(http[s]?:\/\/\S+)", r"[URL](\1)", str(item.description))
45
+ print(f"| {name:<30} | {desc:<115} |")
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (65.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/base.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/fake_sgml.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/iwslt_xml.cpython-310.pyc ADDED
Binary file (480 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/plain_text.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/tsv.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/__pycache__/wmt_xml.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/base.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The base class for all types of datasets.
3
+ """
4
+ import os
5
+ import re
6
+ from abc import ABCMeta, abstractmethod
7
+ from typing import Dict, List, Optional
8
+
9
+ from ..utils import SACREBLEU_DIR, download_file, smart_open
10
+
11
+
12
+ class Dataset(metaclass=ABCMeta):
13
+ def __init__(
14
+ self,
15
+ name: str,
16
+ data: Optional[List[str]] = None,
17
+ description: Optional[str] = None,
18
+ citation: Optional[str] = None,
19
+ md5: Optional[List[str]] = None,
20
+ langpairs=Dict[str, List[str]],
21
+ **kwargs,
22
+ ):
23
+ """
24
+ Params come from the values in DATASETS.
25
+
26
+ :param name: Name of the dataset.
27
+ :param data: URL of the raw data of the dataset.
28
+ :param description: Description of the dataset.
29
+ :param citation: Citation for the dataset.
30
+ :param md5: MD5 checksum of the dataset.
31
+ :param langpairs: List of available language pairs.
32
+ """
33
+ self.name = name
34
+ self.data = data
35
+ self.description = description
36
+ self.citation = citation
37
+ self.md5 = md5
38
+ self.langpairs = langpairs
39
+ self.kwargs = kwargs
40
+
41
+ # Don't do any downloading or further processing now.
42
+ # Only do that lazily, when asked.
43
+
44
+ # where to store the dataset
45
+ self._outdir = os.path.join(SACREBLEU_DIR, self.name)
46
+ self._rawdir = os.path.join(self._outdir, "raw")
47
+
48
+ def maybe_download(self):
49
+ """
50
+ If the dataset isn't downloaded, use utils/download_file()
51
+ This can be implemented here in the base class. It should write
52
+ to ~/.sacreleu/DATASET/raw exactly as it does now.
53
+ """
54
+ os.makedirs(self._rawdir, exist_ok=True)
55
+
56
+ expected_checksums = self.md5 if self.md5 else [None] * len(self.data)
57
+
58
+ for url, expected_md5 in zip(self.data, expected_checksums):
59
+ tarball = os.path.join(self._rawdir, self._get_tarball_filename(url))
60
+
61
+ download_file(
62
+ url, tarball, extract_to=self._rawdir, expected_md5=expected_md5
63
+ )
64
+
65
+ @staticmethod
66
+ def _clean(s):
67
+ """
68
+ Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
69
+
70
+ :param s: The string.
71
+ :return: A cleaned-up string.
72
+ """
73
+ return re.sub(r"\s+", " ", s.strip())
74
+
75
+ def _get_tarball_filename(self, url):
76
+ """
77
+ Produces a local filename for tarball.
78
+ :param url: The url to download.
79
+ :return: A name produced from the dataset identifier and the URL basename.
80
+ """
81
+ return self.name.replace("/", "_") + "." + os.path.basename(url)
82
+
83
+ def _get_txt_file_path(self, langpair, fieldname):
84
+ """
85
+ Given the language pair and fieldname, return the path to the text file.
86
+ The format is: ~/.sacrebleu/DATASET/DATASET.LANGPAIR.FIELDNAME
87
+
88
+ :param langpair: The language pair.
89
+ :param fieldname: The fieldname.
90
+ :return: The path to the text file.
91
+ """
92
+ # handle the special case of subsets. e.g. "wmt21/dev" > "wmt21_dev"
93
+ name = self.name.replace("/", "_")
94
+ # Colons are used to distinguish multiple references, but are not supported in Windows filenames
95
+ fieldname = fieldname.replace(":", "-")
96
+ return os.path.join(self._outdir, f"{name}.{langpair}.{fieldname}")
97
+
98
+ def _get_langpair_metadata(self, langpair):
99
+ """
100
+ Given a language pair, return the metadata for that language pair.
101
+ Deal with errors if the language pair is not available.
102
+
103
+ :param langpair: The language pair. e.g. "en-de"
104
+ :return: Dict format which is same as self.langpairs.
105
+ """
106
+ if langpair is None:
107
+ langpairs = self.langpairs
108
+ elif langpair not in self.langpairs:
109
+ raise Exception(f"No such language pair {self.name}/{langpair}")
110
+ else:
111
+ langpairs = {langpair: self.langpairs[langpair]}
112
+
113
+ return langpairs
114
+
115
+ @abstractmethod
116
+ def process_to_text(self, langpair=None) -> None:
117
+ """Processes raw files to plain text files.
118
+
119
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
120
+ """
121
+ pass
122
+
123
+ def fieldnames(self, langpair) -> List[str]:
124
+ """
125
+ Return a list of all the field names. For most source, this is just
126
+ the source and the reference. For others, it might include the document
127
+ ID for each line, or the original language (origLang).
128
+
129
+ get_files() should return the same number of items as this.
130
+
131
+ :param langpair: The language pair (e.g., "de-en")
132
+ :return: a list of field names
133
+ """
134
+ return ["src", "ref"]
135
+
136
+ def __iter__(self, langpair):
137
+ """
138
+ Iterates over all fields (source, references, and other metadata) defined
139
+ by the dataset.
140
+ """
141
+ all_files = self.get_files(langpair)
142
+ all_fins = [smart_open(f) for f in all_files]
143
+
144
+ for item in zip(*all_fins):
145
+ yield item
146
+
147
+ def source(self, langpair):
148
+ """
149
+ Return an iterable over the source lines.
150
+ """
151
+ source_file = self.get_source_file(langpair)
152
+ with smart_open(source_file) as fin:
153
+ for line in fin:
154
+ yield line.strip()
155
+
156
+ def references(self, langpair):
157
+ """
158
+ Return an iterable over the references.
159
+ """
160
+ ref_files = self.get_reference_files(langpair)
161
+ ref_fins = [smart_open(f) for f in ref_files]
162
+
163
+ for item in zip(*ref_fins):
164
+ yield item
165
+
166
+ def get_source_file(self, langpair):
167
+ all_files = self.get_files(langpair)
168
+ all_fields = self.fieldnames(langpair)
169
+ index = all_fields.index("src")
170
+ return all_files[index]
171
+
172
+ def get_reference_files(self, langpair):
173
+ all_files = self.get_files(langpair)
174
+ all_fields = self.fieldnames(langpair)
175
+ ref_files = [
176
+ f for f, field in zip(all_files, all_fields) if field.startswith("ref")
177
+ ]
178
+ return ref_files
179
+
180
+ def get_files(self, langpair):
181
+ """
182
+ Returns the path of the source file and all reference files for
183
+ the provided test set / language pair.
184
+ Downloads the references first if they are not already local.
185
+
186
+ :param langpair: The language pair (e.g., "de-en")
187
+ :return: a list of the source file and all reference files
188
+ """
189
+ fields = self.fieldnames(langpair)
190
+ files = [self._get_txt_file_path(langpair, field) for field in fields]
191
+
192
+ for file in files:
193
+ if not os.path.exists(file):
194
+ self.process_to_text(langpair)
195
+ return files
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/fake_sgml.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ from ..utils import smart_open
5
+ from .base import Dataset
6
+
7
+
8
+ class FakeSGMLDataset(Dataset):
9
+ """
10
+ The fake SGML format used by WMT prior to 2021. Can't be properly parsed.
11
+ Source and reference(s) in separate files.
12
+ """
13
+
14
+ def _convert_format(self, input_file_path, output_filep_path):
15
+ """
16
+ Extract data from raw file and convert to raw txt format.
17
+ """
18
+ with smart_open(input_file_path) as fin, smart_open(
19
+ output_filep_path, "wt"
20
+ ) as fout:
21
+ for line in fin:
22
+ if line.startswith("<seg "):
23
+ line = self._clean(re.sub(r"<seg.*?>(.*)</seg>.*?", "\\1", line))
24
+ print(line, file=fout)
25
+
26
+ def _convert_meta(self, input_file_path, field, output_filep_path):
27
+ """
28
+ Extract metadata from document tags, projects across segments.
29
+ """
30
+ with smart_open(input_file_path) as fin, smart_open(
31
+ output_filep_path, "wt"
32
+ ) as fout:
33
+ value = ""
34
+ for line in fin:
35
+ if line.startswith("<doc "):
36
+ match = re.search(rf'{field}="(.*?)"', line)
37
+ if match is not None:
38
+ value = match.group(1)
39
+
40
+ elif line.startswith("<seg "):
41
+ # print the current value once for each field
42
+ print(value, file=fout)
43
+
44
+ def process_to_text(self, langpair=None):
45
+ """Processes raw files to plain text files.
46
+
47
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
48
+ """
49
+ # ensure that the dataset is downloaded
50
+ self.maybe_download()
51
+ langpairs = self._get_langpair_metadata(langpair)
52
+
53
+ for langpair in langpairs:
54
+ fieldnames = self.fieldnames(langpair)
55
+ origin_files = [
56
+ os.path.join(self._rawdir, path) for path in langpairs[langpair]
57
+ ]
58
+
59
+ # Add the source file three more times for docid, genre, origlang
60
+ origin_files += [
61
+ os.path.join(self._rawdir, langpairs[langpair][0]) for _ in range(3)
62
+ ]
63
+
64
+ for field, origin_file in zip(fieldnames, origin_files):
65
+
66
+ origin_file = os.path.join(self._rawdir, origin_file)
67
+ output_file = self._get_txt_file_path(langpair, field)
68
+
69
+ if field.startswith("src") or field.startswith("ref"):
70
+ self._convert_format(origin_file, output_file)
71
+ else:
72
+ # document metadata keys
73
+ self._convert_meta(origin_file, field, output_file)
74
+
75
+ def fieldnames(self, langpair):
76
+ """
77
+ Return a list of all the field names. For most source, this is just
78
+ the source and the reference. For others, it might include the document
79
+ ID for each line, or the original language (origLang).
80
+
81
+ get_files() should return the same number of items as this.
82
+ """
83
+ meta = self._get_langpair_metadata(langpair)
84
+ length = len(meta[langpair])
85
+
86
+ assert (
87
+ length >= 2
88
+ ), f"Each language pair in {self.name} must have at least 2 fields."
89
+
90
+ fields = ["src"]
91
+
92
+ if length == 2:
93
+ fields.append("ref")
94
+ else:
95
+ for i, _ in enumerate(meta[langpair][1:]):
96
+ fields.append(f"ref:{i}")
97
+
98
+ if not self.name.startswith("wmt08"):
99
+ fields += ["docid", "genre", "origlang"]
100
+
101
+ return fields
102
+
103
+
104
+ class WMTAdditionDataset(FakeSGMLDataset):
105
+ """
106
+ Handle special case of WMT Google addition dataset.
107
+ """
108
+
109
+ def _convert_format(self, input_file_path, output_filep_path):
110
+ if input_file_path.endswith(".sgm"):
111
+ return super()._convert_format(input_file_path, output_filep_path)
112
+ else:
113
+ with smart_open(input_file_path) as fin:
114
+ with smart_open(output_filep_path, "wt") as fout:
115
+ for line in fin:
116
+ print(line.rstrip(), file=fout)
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .fake_sgml import FakeSGMLDataset
2
+
3
+
4
+ class IWSLTXMLDataset(FakeSGMLDataset):
5
+ """IWSLT dataset format. Can be parsed with the lxml parser."""
6
+
7
+ # Same as FakeSGMLDataset. Nothing to do here.
8
+ pass
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from ..utils import smart_open
4
+ from .base import Dataset
5
+
6
+
7
+ class PlainTextDataset(Dataset):
8
+ """
9
+ The plain text format. Data is separated into source and reference files.
10
+ Each line of the two files is aligned.
11
+ """
12
+
13
+ def process_to_text(self, langpair=None):
14
+ """Processes raw files to plain text files.
15
+
16
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
17
+ """
18
+ # ensure that the dataset is downloaded
19
+ self.maybe_download()
20
+ langpairs = self._get_langpair_metadata(langpair)
21
+
22
+ for langpair in langpairs:
23
+ fieldnames = self.fieldnames(langpair)
24
+ origin_files = [
25
+ os.path.join(self._rawdir, path) for path in langpairs[langpair]
26
+ ]
27
+
28
+ for field, origin_file in zip(fieldnames, origin_files):
29
+
30
+ origin_file = os.path.join(self._rawdir, origin_file)
31
+ output_file = self._get_txt_file_path(langpair, field)
32
+
33
+ with smart_open(origin_file) as fin:
34
+ with smart_open(output_file, "wt") as fout:
35
+ for line in fin:
36
+ print(line.rstrip(), file=fout)
llmeval-env/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from ..utils import smart_open
4
+ from .base import Dataset
5
+
6
+
7
+ class TSVDataset(Dataset):
8
+ """
9
+ The format used by the MTNT datasets. Data is in a single TSV file.
10
+ """
11
+
12
+ @staticmethod
13
+ def _split_index_and_filename(meta, field):
14
+ """
15
+ Splits the index and filename from a metadata string.
16
+
17
+ e.g. meta="3:en-de.tsv", filed=[Any value] -> (3, "en-de.tsv")
18
+ "en-de.tsv", filed="src" -> (1, "en-de.tsv")
19
+ "en-de.tsv", filed="tgt" -> (2, "en-de.tsv")
20
+ """
21
+ arr = meta.split(":")
22
+ if len(arr) == 2:
23
+ try:
24
+ index = int(arr[0])
25
+ except ValueError:
26
+ raise Exception(f"Invalid meta for TSVDataset: {meta}")
27
+ return index, arr[1]
28
+
29
+ else:
30
+ index = 0 if field == "src" else 1
31
+ return index, meta
32
+
33
+ def process_to_text(self, langpair=None):
34
+ """Processes raw files to plain text files.
35
+
36
+ :param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
37
+ """
38
+ # ensure that the dataset is downloaded
39
+ self.maybe_download()
40
+ langpairs = self._get_langpair_metadata(langpair)
41
+
42
+ for langpair in langpairs:
43
+ fieldnames = self.fieldnames(langpair)
44
+ origin_files = [
45
+ os.path.join(self._rawdir, path) for path in langpairs[langpair]
46
+ ]
47
+
48
+ for field, origin_file, meta in zip(
49
+ fieldnames, origin_files, langpairs[langpair]
50
+ ):
51
+ index, origin_file = self._split_index_and_filename(meta, field)
52
+
53
+ origin_file = os.path.join(self._rawdir, origin_file)
54
+ output_file = self._get_txt_file_path(langpair, field)
55
+
56
+ with smart_open(origin_file) as fin:
57
+ with smart_open(output_file, "wt") as fout:
58
+ for line in fin:
59
+ # be careful with empty source or reference lines
60
+ # MTNT2019/ja-en.final.tsv:632 `'1033\t718\t\t\n'`
61
+ print(line.rstrip("\n").split("\t")[index], file=fout)
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of various metrics."""
2
+
3
+ from .bleu import BLEU, BLEUScore # noqa: F401
4
+ from .chrf import CHRF, CHRFScore # noqa: F401
5
+ from .ter import TER, TERScore # noqa: F401
6
+
7
+ METRICS = {
8
+ 'BLEU': BLEU,
9
+ 'CHRF': CHRF,
10
+ 'TER': TER,
11
+ }
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (447 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/base.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/lib_ter.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/__pycache__/ter.cpython-310.pyc ADDED
Binary file (7.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/base.py ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The base `Score`, `Metric` and `Signature` classes to derive from.
2
+
3
+ `Metric` is an abstract class that enforces the implementation of a set
4
+ of abstract methods. This way, a correctly implemented metric will work
5
+ seamlessly with the rest of the codebase.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ import statistics
11
+ from typing import List, Sequence, Any, Optional, Dict
12
+ from abc import ABCMeta, abstractmethod
13
+
14
+ from .. import __version__
15
+
16
+ sacrelogger = logging.getLogger('sacrebleu')
17
+
18
+
19
+ class Score:
20
+ """A base score class to derive from.
21
+
22
+ :param name: The name of the underlying metric.
23
+ :param score: A floating point number for the final metric.
24
+ """
25
+ def __init__(self, name: str, score: float):
26
+ """`Score` initializer."""
27
+ self.name = name
28
+ self.score = score
29
+
30
+ # Statistical test related fields
31
+ self._mean = -1.0
32
+ self._ci = -1.0
33
+
34
+ # More info can be added right after the score
35
+ self._verbose = ''
36
+
37
+ def format(self, width: int = 2, score_only: bool = False,
38
+ signature: str = '', is_json: bool = False) -> str:
39
+ """Returns a pretty representation of the score.
40
+ :param width: Floating point decimal precision width.
41
+ :param score_only: If `True`, and the format is not `json`,
42
+ returns a single score string.
43
+ :param signature: A string representation of the given `Signature`
44
+ instance.
45
+ :param is_json: If `True`, will output the score in JSON string.
46
+ :return: A plain or JSON-formatted string representation.
47
+ """
48
+ d = {
49
+ 'name': self.name,
50
+ 'score': float(f'{self.score:.{width}f}'),
51
+ 'signature': signature,
52
+ }
53
+
54
+ sc = f'{self.score:.{width}f}'
55
+
56
+ if self._mean > 0:
57
+ confidence_mean = f'{self._mean:.{width}f}'
58
+ confidence_var = f'{self._ci:.{width}f}'
59
+ confidence_str = f'μ = {confidence_mean} ± {confidence_var}'
60
+
61
+ sc += f' ({confidence_str})'
62
+ if is_json:
63
+ d['confidence_mean'] = float(confidence_mean)
64
+ d['confidence_var'] = float(confidence_var)
65
+ d['confidence'] = confidence_str
66
+
67
+ # Construct full score line
68
+ full_score = f"{self.name}|{signature}" if signature else self.name
69
+ full_score = f"{full_score} = {sc}"
70
+ if self._verbose:
71
+ full_score += f' {self._verbose}'
72
+ d['verbose_score'] = self._verbose
73
+
74
+ if score_only:
75
+ return sc
76
+
77
+ if is_json:
78
+ for param in signature.split('|'):
79
+ key, value = param.split(':')
80
+ d[key] = value
81
+ return json.dumps(d, indent=1, ensure_ascii=False)
82
+
83
+ return full_score
84
+
85
+ def estimate_ci(self, scores: List['Score']):
86
+ """Takes a list of scores and stores mean, stdev and 95% confidence
87
+ interval around the mean.
88
+
89
+ :param scores: A list of `Score` objects obtained from bootstrap
90
+ resampling for example.
91
+ """
92
+ # Sort the scores
93
+ raw_scores = sorted([x.score for x in scores])
94
+ n = len(raw_scores)
95
+
96
+ # Get CI bounds (95%, i.e. 1/40 from left)
97
+ lower_idx = n // 40
98
+ upper_idx = n - lower_idx - 1
99
+ lower, upper = raw_scores[lower_idx], raw_scores[upper_idx]
100
+ self._ci = 0.5 * (upper - lower)
101
+ self._mean = statistics.mean(raw_scores)
102
+
103
+ def __repr__(self):
104
+ """Returns a human readable score string."""
105
+ return self.format()
106
+
107
+
108
+ class Signature:
109
+ """A convenience class to represent sacreBLEU reproducibility signatures.
110
+
111
+ :param args: key-value dictionary passed from the actual metric instance.
112
+ """
113
+ def __init__(self, args: dict):
114
+ """`Signature` initializer."""
115
+ # Global items that are shared across all metrics
116
+ self._abbr = {
117
+ 'version': 'v',
118
+ 'nrefs': '#',
119
+ 'test': 't',
120
+ 'lang': 'l',
121
+ 'subset': 'S',
122
+ 'origlang': 'o',
123
+ 'bs': 'bs', # Bootstrap resampling trials
124
+ 'ar': 'ar', # Approximate randomization trials
125
+ 'seed': 'rs', # RNG's seed
126
+ }
127
+
128
+ if 'num_refs' not in args:
129
+ raise ValueError(
130
+ 'Number of references unknown, please evaluate the metric first.')
131
+
132
+ num_refs = args['num_refs']
133
+ if num_refs == -1:
134
+ # Detect variable number of refs
135
+ num_refs = 'var'
136
+
137
+ # Global items that are shared across all metrics
138
+ # None's will be ignored
139
+ self.info = {
140
+ 'version': __version__,
141
+ 'nrefs': num_refs,
142
+ 'bs': args.get('n_bootstrap', None),
143
+ 'ar': None,
144
+ 'seed': args.get('seed', None),
145
+ 'test': args.get('test_set', None),
146
+ 'lang': args.get('langpair', None),
147
+ 'origlang': args.get('origlang', None),
148
+ 'subset': args.get('subset', None),
149
+ }
150
+
151
+ def format(self, short: bool = False) -> str:
152
+ """Returns a string representation of the signature.
153
+
154
+ :param short: If True, shortened signature is produced.
155
+ :return: A string representation of the signature.
156
+ """
157
+ pairs = []
158
+ keys = list(self.info.keys())
159
+ # keep version always at end
160
+ keys.remove('version')
161
+ for name in keys + ['version']:
162
+ value = self.info[name]
163
+ if value is not None:
164
+ if isinstance(value, bool):
165
+ # Replace True/False with yes/no
166
+ value = 'yes' if value else 'no'
167
+ final_name = self._abbr[name] if short else name
168
+ pairs.append(f'{final_name}:{value}')
169
+
170
+ return '|'.join(pairs)
171
+
172
+ def update(self, key: str, value: Any):
173
+ """Add a new item or update an existing one.
174
+
175
+ :param key: The key to use in the dictionary.
176
+ :param value: The associated value for the `key`.
177
+ """
178
+ self.info[key] = value
179
+
180
+ def __str__(self):
181
+ """Returns a human-readable signature string."""
182
+ return self.format()
183
+
184
+ def __repr__(self):
185
+ """Returns a human-readable signature string."""
186
+ return self.format()
187
+
188
+
189
+ class Metric(metaclass=ABCMeta):
190
+ """A base class for all metrics that ensures the implementation of some
191
+ methods. Much of the common functionality is moved to this base class
192
+ from other metrics."""
193
+
194
+ # Each metric should define its Signature class' name here
195
+ _SIGNATURE_TYPE = Signature
196
+
197
+ def __init__(self):
198
+ """`Metric` initializer."""
199
+ # The pre-computed reference cache
200
+ self._ref_cache = None
201
+
202
+ # only useful for BLEU tokenized warnings. Set to True so that
203
+ # warnings are not issued for other metrics.
204
+ self._force = True
205
+
206
+ # Will be used by the signature when bootstrap resampling
207
+ self.n_bootstrap = None
208
+ self.seed = None
209
+
210
+ def _check_sentence_score_args(self, hyp: str, refs: Sequence[str]):
211
+ """Performs sanity checks on `sentence_score` method's arguments.
212
+
213
+ :param hyp: A single hypothesis string.
214
+ :param refs: A sequence of reference strings.
215
+ """
216
+ prefix = self.__class__.__name__
217
+ err_msg = None
218
+
219
+ if not isinstance(hyp, str):
220
+ err_msg = 'The argument `hyp` should be a string.'
221
+ elif isinstance(refs, str) or not isinstance(refs, Sequence):
222
+ err_msg = 'The argument `refs` should be a sequence of strings.'
223
+ elif not isinstance(refs[0], str) and refs[0] is not None:
224
+ err_msg = 'Each element of `refs` should be a string.'
225
+
226
+ if err_msg:
227
+ raise TypeError(f'{prefix}: {err_msg}')
228
+
229
+ def _check_corpus_score_args(self, hyps: Sequence[str],
230
+ refs: Optional[Sequence[Sequence[str]]]):
231
+ """Performs sanity checks on `corpus_score` method's arguments.
232
+
233
+ :param hypses: A sequence of hypothesis strings.
234
+ :param refs: A sequence of reference documents with document being
235
+ defined as a sequence of reference strings. If `None`, cached references
236
+ will be used.
237
+ """
238
+
239
+ prefix = self.__class__.__name__
240
+ err_msg = None
241
+
242
+ if not isinstance(hyps, Sequence):
243
+ err_msg = "`hyps` should be a sequence of strings."
244
+ elif not isinstance(hyps[0], str):
245
+ err_msg = 'Each element of `hyps` should be a string.'
246
+ elif any(line is None for line in hyps):
247
+ err_msg = "Undefined line in hypotheses stream!"
248
+
249
+ if refs is not None:
250
+ if not isinstance(refs, Sequence):
251
+ err_msg = "`refs` should be a sequence of sequence of strings."
252
+ elif not isinstance(refs[0], Sequence):
253
+ err_msg = "Each element of `refs` should be a sequence of strings."
254
+ elif not isinstance(refs[0][0], str) and refs[0][0] is not None:
255
+ err_msg = "`refs` should be a sequence of sequence of strings."
256
+
257
+ if err_msg:
258
+ raise TypeError(f'{prefix}: {err_msg}')
259
+
260
+ @abstractmethod
261
+ def _aggregate_and_compute(self, stats: List[List[Any]]) -> Any:
262
+ """Computes the final score given the pre-computed match statistics.
263
+
264
+ :param stats: A list of segment-level statistics.
265
+ :return: A `Score` instance.
266
+ """
267
+ pass
268
+
269
+ @abstractmethod
270
+ def _compute_score_from_stats(self, stats: List[Any]) -> Any:
271
+ """Computes the final score from already aggregated statistics.
272
+
273
+ :param stats: A list or numpy array of segment-level statistics.
274
+ :return: A `Score` object.
275
+ """
276
+ pass
277
+
278
+ @abstractmethod
279
+ def _preprocess_segment(self, sent: str) -> str:
280
+ """A wrapper around the metric's tokenization and pre-processing logic.
281
+ This should be implemented for reference caching to work correctly.
282
+
283
+ :param sent: The input sentence.
284
+ :return: The pre-processed output sentence.
285
+ """
286
+ pass
287
+
288
+ @abstractmethod
289
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
290
+ """Given a list of reference segments, extract the required
291
+ information (such as n-grams for BLEU and chrF). This should be implemented
292
+ for the generic `_cache_references()` to work across all metrics.
293
+
294
+ :param refs: A sequence of strings.
295
+ """
296
+ pass
297
+
298
+ @abstractmethod
299
+ def _compute_segment_statistics(self, hypothesis: str, ref_kwargs: Dict) -> List[Any]:
300
+ """Given a (pre-processed) hypothesis sentence and already computed
301
+ reference info, returns the best match statistics across the
302
+ references. The return type is usually a List of ints or floats.
303
+
304
+ :param hypothesis: A pre-processed hypothesis sentence.
305
+ :param ref_kwargs: A dictionary with reference-related information
306
+ within. This is formulated as a dictionary as different metrics may
307
+ require different information regarding a reference segment.
308
+ """
309
+ pass
310
+
311
+ def _cache_references(self, references: Sequence[Sequence[str]]) -> List[Any]:
312
+ """Given the full set of document references, extract segment n-grams
313
+ (or other necessary information) for caching purposes.
314
+
315
+ :param references: A sequence of reference documents with document being
316
+ defined as a sequence of reference strings. A particular reference
317
+ segment can be '' or `None` to allow the use of variable number
318
+ of references per segment.
319
+ :return: A list where each element is a tuple of segment n-grams and
320
+ reference lengths, as returned by `_extract_reference_info()`.
321
+ """
322
+ ref_cache = []
323
+
324
+ # Decide on final number of refs here as well
325
+ num_refs = set()
326
+
327
+ for refs in zip(*references):
328
+ # Remove undefined references
329
+ lines = [x for x in refs if x is not None]
330
+
331
+ # Keep track of reference counts to allow variable reference
332
+ # info in the signature
333
+ num_refs.add(len(lines))
334
+
335
+ lines = [self._preprocess_segment(x) for x in lines]
336
+
337
+ # Get n-grams
338
+ ref_cache.append(self._extract_reference_info(lines))
339
+
340
+ if len(num_refs) == 1:
341
+ self.num_refs = list(num_refs)[0]
342
+ else:
343
+ # A variable number of refs exist
344
+ self.num_refs = -1
345
+
346
+ return ref_cache
347
+
348
+ def _extract_corpus_statistics(self, hypotheses: Sequence[str],
349
+ references: Optional[Sequence[Sequence[str]]]) -> Any:
350
+ """Reads the corpus and returns sentence-level match statistics for
351
+ faster re-computations esp. during statistical tests.
352
+
353
+ :param hypotheses: A sequence of hypothesis strings.
354
+ :param references: A sequence of reference documents with document being
355
+ defined as a sequence of reference strings. If `None`, cached references
356
+ will be used.
357
+ :return: A list where each sublist corresponds to segment statistics.
358
+ """
359
+ # Pre-compute references
360
+ # Don't store the cache as the user is explicitly passing refs
361
+ if references:
362
+ ref_cache = self._cache_references(references)
363
+ elif self._ref_cache:
364
+ ref_cache = self._ref_cache
365
+ else:
366
+ raise RuntimeError('No references provided and the cache is empty.')
367
+
368
+ stats = []
369
+ tok_count = 0
370
+
371
+ for hyp, ref_kwargs in zip(hypotheses, ref_cache):
372
+ # Check for already-tokenized input problem (only for BLEU)
373
+ if not self._force and hyp.endswith(' .'):
374
+ tok_count += 1
375
+
376
+ hyp = self._preprocess_segment(hyp)
377
+
378
+ # Collect stats
379
+ stats.append(self._compute_segment_statistics(hyp, ref_kwargs))
380
+
381
+ if tok_count >= 100:
382
+ sacrelogger.warning("That's 100 lines that end in a tokenized period ('.')")
383
+ sacrelogger.warning("It looks like you forgot to detokenize your test data, which may hurt your score.")
384
+ sacrelogger.warning("If you insist your data is detokenized, or don't care, you can suppress this message with the `force` parameter.")
385
+
386
+ return stats
387
+
388
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> Any:
389
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
390
+
391
+ :param hypothesis: A single hypothesis string.
392
+ :param references: A sequence of reference strings.
393
+ :return: A `Score` object.
394
+ """
395
+ self._check_sentence_score_args(hypothesis, references)
396
+
397
+ stats = self._extract_corpus_statistics(
398
+ [hypothesis], [[refs] for refs in references])
399
+ return self._aggregate_and_compute(stats)
400
+
401
+ def corpus_score(self, hypotheses: Sequence[str],
402
+ references: Optional[Sequence[Sequence[str]]],
403
+ n_bootstrap: int = 1) -> Any:
404
+ """Compute the metric for a corpus against a single (or multiple) reference(s).
405
+
406
+ :param hypotheses: A sequence of hypothesis strings.
407
+ :param references: A sequence of reference documents with document being
408
+ defined as a sequence of reference strings. If `None`, cached references
409
+ will be used.
410
+ :param n_bootstrap: If > 1, provides 95% confidence interval around true mean
411
+ using bootstrap resampling with `n_bootstrap` samples.
412
+ :return: A `Score` object.
413
+ """
414
+ self._check_corpus_score_args(hypotheses, references)
415
+
416
+ # Collect corpus stats
417
+ stats = self._extract_corpus_statistics(hypotheses, references)
418
+
419
+ # Compute the actual system score
420
+ actual_score = self._aggregate_and_compute(stats)
421
+
422
+ if n_bootstrap > 1:
423
+ # Compute bootstrap estimate as well
424
+ # Delayed import is to escape from numpy import if bootstrap
425
+ # is not requested.
426
+ from ..significance import _bootstrap_resample
427
+
428
+ self.n_bootstrap = n_bootstrap
429
+ self.seed, bs_scores = _bootstrap_resample(stats, self, n_bootstrap)
430
+ actual_score.estimate_ci(bs_scores)
431
+
432
+ return actual_score
433
+
434
+ def get_signature(self) -> Signature:
435
+ """Creates and returns the signature for the metric. The creation
436
+ of signatures is delayed as the number of references is resolved
437
+ only at the point of reference caching."""
438
+ return self._SIGNATURE_TYPE(self.__dict__)
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/bleu.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of the BLEU metric (Papineni et al., 2002)."""
2
+
3
+ import math
4
+ import logging
5
+ from importlib import import_module
6
+ from typing import List, Sequence, Optional, Dict, Any
7
+
8
+ from ..utils import my_log, sum_of_lists
9
+
10
+ from .base import Score, Signature, Metric
11
+ from .helpers import extract_all_word_ngrams
12
+
13
+ sacrelogger = logging.getLogger('sacrebleu')
14
+
15
+ # The default for the maximum n-gram order when computing precisions
16
+ MAX_NGRAM_ORDER = 4
17
+
18
+ _TOKENIZERS = {
19
+ 'none': 'tokenizer_none.NoneTokenizer',
20
+ 'zh': 'tokenizer_zh.TokenizerZh',
21
+ '13a': 'tokenizer_13a.Tokenizer13a',
22
+ 'intl': 'tokenizer_intl.TokenizerV14International',
23
+ 'char': 'tokenizer_char.TokenizerChar',
24
+ 'ja-mecab': 'tokenizer_ja_mecab.TokenizerJaMecab',
25
+ 'ko-mecab': 'tokenizer_ko_mecab.TokenizerKoMecab',
26
+ 'spm': 'tokenizer_spm.TokenizerSPM',
27
+ 'flores101': 'tokenizer_spm.Flores101Tokenizer',
28
+ 'flores200': 'tokenizer_spm.Flores200Tokenizer',
29
+ }
30
+
31
+
32
+ def _get_tokenizer(name: str):
33
+ """Dynamically import tokenizer as importing all is slow."""
34
+ module_name, class_name = _TOKENIZERS[name].rsplit('.', 1)
35
+ return getattr(
36
+ import_module(f'.tokenizers.{module_name}', 'sacrebleu'),
37
+ class_name)
38
+
39
+
40
+ class BLEUSignature(Signature):
41
+ """A convenience class to represent the reproducibility signature for BLEU.
42
+
43
+ :param args: key-value dictionary passed from the actual metric instance.
44
+ """
45
+ def __init__(self, args: dict):
46
+ """`BLEUSignature` initializer."""
47
+ super().__init__(args)
48
+
49
+ self._abbr.update({
50
+ 'case': 'c',
51
+ 'eff': 'e',
52
+ 'tok': 'tok',
53
+ 'smooth': 's',
54
+ })
55
+
56
+ # Construct a combined string for smoothing method and value
57
+ smooth_str = args['smooth_method']
58
+ smooth_def = BLEU.SMOOTH_DEFAULTS[smooth_str]
59
+
60
+ # If the method requires a parameter, add it within brackets
61
+ if smooth_def is not None:
62
+ # the following can be None if the user wants to use the default
63
+ smooth_val = args['smooth_value']
64
+
65
+ if smooth_val is None:
66
+ smooth_val = smooth_def
67
+
68
+ smooth_str += f'[{smooth_val:.2f}]'
69
+
70
+ self.info.update({
71
+ 'case': 'lc' if args['lowercase'] else 'mixed',
72
+ 'eff': 'yes' if args['effective_order'] else 'no',
73
+ 'tok': args['tokenizer_signature'],
74
+ 'smooth': smooth_str,
75
+ })
76
+
77
+
78
+ class BLEUScore(Score):
79
+ """A convenience class to represent BLEU scores.
80
+
81
+ :param score: The BLEU score.
82
+ :param counts: List of counts of correct ngrams, 1 <= n <= max_ngram_order
83
+ :param totals: List of counts of total ngrams, 1 <= n <= max_ngram_order
84
+ :param precisions: List of precisions, 1 <= n <= max_ngram_order
85
+ :param bp: The brevity penalty.
86
+ :param sys_len: The cumulative system length.
87
+ :param ref_len: The cumulative reference length.
88
+ """
89
+ def __init__(self, score: float, counts: List[int], totals: List[int],
90
+ precisions: List[float], bp: float,
91
+ sys_len: int, ref_len: int):
92
+ """`BLEUScore` initializer."""
93
+ super().__init__('BLEU', score)
94
+ self.bp = bp
95
+ self.counts = counts
96
+ self.totals = totals
97
+ self.sys_len = sys_len
98
+ self.ref_len = ref_len
99
+ self.precisions = precisions
100
+
101
+ self.prec_str = "/".join([f"{p:.1f}" for p in self.precisions])
102
+ self.ratio = self.sys_len / self.ref_len if self.ref_len else 0
103
+
104
+ # The verbose part of BLEU
105
+ self._verbose = f"{self.prec_str} (BP = {self.bp:.3f} "
106
+ self._verbose += f"ratio = {self.ratio:.3f} hyp_len = {self.sys_len:d} "
107
+ self._verbose += f"ref_len = {self.ref_len:d})"
108
+
109
+
110
+ class BLEU(Metric):
111
+ """Computes the BLEU metric given hypotheses and references.
112
+
113
+ :param lowercase: If True, lowercased BLEU is computed.
114
+ :param force: Ignore data that looks already tokenized.
115
+ :param tokenize: The tokenizer to use. If None, defaults to language-specific tokenizers with '13a' as the fallback default.
116
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none').
117
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
118
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
119
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
120
+ `True`, if sentence-level BLEU will be computed.
121
+ :param trg_lang: An optional language code to raise potential tokenizer warnings.
122
+ :param references: A sequence of reference documents with document being
123
+ defined as a sequence of reference strings. If given, the reference n-grams
124
+ and lengths will be pre-computed and cached for faster BLEU computation
125
+ across many systems.
126
+ """
127
+
128
+ SMOOTH_DEFAULTS: Dict[str, Optional[float]] = {
129
+ # The defaults for `floor` and `add-k` are obtained from the following paper
130
+ # A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU
131
+ # Boxing Chen and Colin Cherry
132
+ # http://aclweb.org/anthology/W14-3346
133
+ 'none': None, # No value is required
134
+ 'floor': 0.1,
135
+ 'add-k': 1,
136
+ 'exp': None, # No value is required
137
+ }
138
+
139
+ TOKENIZERS = _TOKENIZERS.keys()
140
+
141
+ # mteval-v13a.pl tokenizer unless Chinese or Japanese is provided
142
+ TOKENIZER_DEFAULT = '13a'
143
+
144
+ # Some language specific mappings to use if `trg_lang` is given
145
+ # and the tokenizer is not explicitly specified
146
+ _TOKENIZER_MAP = {
147
+ 'zh': 'zh',
148
+ 'ja': 'ja-mecab',
149
+ 'ko': 'ko-mecab',
150
+ }
151
+
152
+ _SIGNATURE_TYPE = BLEUSignature
153
+
154
+ def __init__(self, lowercase: bool = False,
155
+ force: bool = False,
156
+ tokenize: Optional[str] = None,
157
+ smooth_method: str = 'exp',
158
+ smooth_value: Optional[float] = None,
159
+ max_ngram_order: int = MAX_NGRAM_ORDER,
160
+ effective_order: bool = False,
161
+ trg_lang: str = '',
162
+ references: Optional[Sequence[Sequence[str]]] = None):
163
+ """`BLEU` initializer."""
164
+ super().__init__()
165
+
166
+ self._force = force
167
+ self.trg_lang = trg_lang
168
+ self.lowercase = lowercase
169
+ self.smooth_value = smooth_value
170
+ self.smooth_method = smooth_method
171
+ self.max_ngram_order = max_ngram_order
172
+ self.effective_order = effective_order
173
+
174
+ # Sanity check
175
+ assert self.smooth_method in self.SMOOTH_DEFAULTS.keys(), \
176
+ "Unknown smooth_method {self.smooth_method!r}"
177
+
178
+ # If the tokenizer wasn't specified, choose it according to the
179
+ # following logic. We use 'v13a' except for ZH and JA. Note that
180
+ # this logic can only be applied when sacrebleu knows the target
181
+ # language, which is only the case for builtin datasets.
182
+ if tokenize is None:
183
+ best_tokenizer = self.TOKENIZER_DEFAULT
184
+
185
+ # Set `zh` or `ja-mecab` or `ko-mecab` if target language is provided
186
+ if self.trg_lang in self._TOKENIZER_MAP:
187
+ best_tokenizer = self._TOKENIZER_MAP[self.trg_lang]
188
+ else:
189
+ best_tokenizer = tokenize
190
+ if self.trg_lang == 'zh' and best_tokenizer != 'zh':
191
+ sacrelogger.warning(
192
+ "Consider using the 'zh' or 'spm' tokenizer for Chinese.")
193
+ if self.trg_lang == 'ja' and best_tokenizer != 'ja-mecab':
194
+ sacrelogger.warning(
195
+ "Consider using the 'ja-mecab' or 'spm' tokenizer for Japanese.")
196
+ if self.trg_lang == 'ko' and best_tokenizer != 'ko-mecab':
197
+ sacrelogger.warning(
198
+ "Consider using the 'ko-mecab' or 'spm' tokenizer for Korean.")
199
+
200
+ # Create the tokenizer
201
+ self.tokenizer = _get_tokenizer(best_tokenizer)()
202
+
203
+ # Build the signature
204
+ self.tokenizer_signature = self.tokenizer.signature()
205
+
206
+ if references is not None:
207
+ # Pre-compute reference ngrams and lengths
208
+ self._ref_cache = self._cache_references(references)
209
+
210
+ @staticmethod
211
+ def compute_bleu(correct: List[int],
212
+ total: List[int],
213
+ sys_len: int,
214
+ ref_len: int,
215
+ smooth_method: str = 'none',
216
+ smooth_value=None,
217
+ effective_order: bool = False,
218
+ max_ngram_order: int = MAX_NGRAM_ORDER) -> BLEUScore:
219
+ """Computes BLEU score from its sufficient statistics with smoothing.
220
+
221
+ Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
222
+ Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
223
+
224
+ - none: No smoothing.
225
+ - floor: Method 1 (requires small positive value (0.1 in the paper) to be set)
226
+ - add-k: Method 2 (Generalizing Lin and Och, 2004)
227
+ - exp: Method 3 (NIST smoothing method i.e. in use with mteval-v13a.pl)
228
+
229
+ :param correct: List of counts of correct ngrams, 1 <= n <= max_ngram_order
230
+ :param total: List of counts of total ngrams, 1 <= n <= max_ngram_order
231
+ :param sys_len: The cumulative system length
232
+ :param ref_len: The cumulative reference length
233
+ :param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
234
+ :param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
235
+ :param effective_order: If `True`, stop including n-gram orders for which precision is 0. This should be
236
+ `True`, if sentence-level BLEU will be computed.
237
+ :param max_ngram_order: If given, it overrides the maximum n-gram order (default: 4) when computing precisions.
238
+ :return: A `BLEUScore` instance.
239
+ """
240
+ assert smooth_method in BLEU.SMOOTH_DEFAULTS.keys(), \
241
+ "Unknown smooth_method {smooth_method!r}"
242
+
243
+ # Fetch the default value for floor and add-k
244
+ if smooth_value is None:
245
+ smooth_value = BLEU.SMOOTH_DEFAULTS[smooth_method]
246
+
247
+ # Compute brevity penalty
248
+ if sys_len < ref_len:
249
+ bp = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
250
+ else:
251
+ bp = 1.0
252
+
253
+ # n-gram precisions
254
+ precisions = [0.0 for x in range(max_ngram_order)]
255
+
256
+ # Early stop if there are no matches (#141)
257
+ if not any(correct):
258
+ return BLEUScore(0.0, correct, total, precisions, bp, sys_len, ref_len)
259
+
260
+ smooth_mteval = 1.
261
+ eff_order = max_ngram_order
262
+ for n in range(1, len(precisions) + 1):
263
+ if smooth_method == 'add-k' and n > 1:
264
+ correct[n - 1] += smooth_value
265
+ total[n - 1] += smooth_value
266
+
267
+ if total[n - 1] == 0:
268
+ break
269
+
270
+ # If the system guesses no i-grams, 1 <= i <= max_ngram_order,
271
+ # the BLEU score is 0 (technically undefined). This is a problem for sentence
272
+ # level BLEU or a corpus of short sentences, where systems will get
273
+ # no credit if sentence lengths fall under the max_ngram_order threshold.
274
+ # This fix scales max_ngram_order to the observed maximum order.
275
+ # It is only available through the API and off by default
276
+ if effective_order:
277
+ eff_order = n
278
+
279
+ if correct[n - 1] == 0:
280
+ if smooth_method == 'exp':
281
+ smooth_mteval *= 2
282
+ precisions[n - 1] = 100. / (smooth_mteval * total[n - 1])
283
+ elif smooth_method == 'floor':
284
+ precisions[n - 1] = 100. * smooth_value / total[n - 1]
285
+ else:
286
+ precisions[n - 1] = 100. * correct[n - 1] / total[n - 1]
287
+
288
+ # Compute BLEU score
289
+ score = bp * math.exp(
290
+ sum([my_log(p) for p in precisions[:eff_order]]) / eff_order)
291
+
292
+ return BLEUScore(score, correct, total, precisions, bp, sys_len, ref_len)
293
+
294
+ def _preprocess_segment(self, sent: str) -> str:
295
+ """Given a sentence, lowercases (optionally) and tokenizes it
296
+ :param sent: The input sentence string.
297
+ :return: The pre-processed output string.
298
+ """
299
+ if self.lowercase:
300
+ sent = sent.lower()
301
+ return self.tokenizer(sent.rstrip())
302
+
303
+ def _compute_score_from_stats(self, stats: List[int]) -> BLEUScore:
304
+ """Computes the final score from already aggregated statistics.
305
+
306
+ :param stats: A list or numpy array of segment-level statistics.
307
+ :return: A `BLEUScore` object.
308
+ """
309
+ return self.compute_bleu(
310
+ correct=stats[2: 2 + self.max_ngram_order],
311
+ total=stats[2 + self.max_ngram_order:],
312
+ sys_len=int(stats[0]), ref_len=int(stats[1]),
313
+ smooth_method=self.smooth_method, smooth_value=self.smooth_value,
314
+ effective_order=self.effective_order,
315
+ max_ngram_order=self.max_ngram_order
316
+ )
317
+
318
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> BLEUScore:
319
+ """Computes the final BLEU score given the pre-computed corpus statistics.
320
+
321
+ :param stats: A list of segment-level statistics
322
+ :return: A `BLEUScore` instance.
323
+ """
324
+ return self._compute_score_from_stats(sum_of_lists(stats))
325
+
326
+ def _get_closest_ref_len(self, hyp_len: int, ref_lens: List[int]) -> int:
327
+ """Given a hypothesis length and a list of reference lengths, returns
328
+ the closest reference length to be used by BLEU.
329
+
330
+ :param hyp_len: The hypothesis length.
331
+ :param ref_lens: A list of reference lengths.
332
+ :return: The closest reference length.
333
+ """
334
+ closest_diff, closest_len = -1, -1
335
+
336
+ for ref_len in ref_lens:
337
+ diff = abs(hyp_len - ref_len)
338
+ if closest_diff == -1 or diff < closest_diff:
339
+ closest_diff = diff
340
+ closest_len = ref_len
341
+ elif diff == closest_diff and ref_len < closest_len:
342
+ closest_len = ref_len
343
+
344
+ return closest_len
345
+
346
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
347
+ """Given a list of reference segments, extract the n-grams and reference lengths.
348
+ The latter will be useful when comparing hypothesis and reference lengths for BLEU.
349
+
350
+ :param refs: A sequence of strings.
351
+ :return: A dictionary that will be passed to `_compute_segment_statistics()`
352
+ through keyword arguments.
353
+ """
354
+ ngrams = None
355
+ ref_lens = []
356
+
357
+ for ref in refs:
358
+ # extract n-grams for this ref
359
+ this_ngrams, ref_len = extract_all_word_ngrams(ref, 1, self.max_ngram_order)
360
+ ref_lens.append(ref_len)
361
+
362
+ if ngrams is None:
363
+ # Set it directly for first set of refs
364
+ ngrams = this_ngrams
365
+ else:
366
+ # Merge counts across multiple references
367
+ # The below loop is faster than `ngrams |= this_ngrams`
368
+ for ngram, count in this_ngrams.items():
369
+ ngrams[ngram] = max(ngrams[ngram], count)
370
+
371
+ return {'ref_ngrams': ngrams, 'ref_lens': ref_lens}
372
+
373
+ def _compute_segment_statistics(self, hypothesis: str,
374
+ ref_kwargs: Dict) -> List[int]:
375
+ """Given a (pre-processed) hypothesis sentence and already computed
376
+ reference n-grams & lengths, returns the best match statistics across the
377
+ references.
378
+
379
+ :param hypothesis: Hypothesis sentence.
380
+ :param ref_kwargs: A dictionary with `refs_ngrams`and `ref_lens` keys
381
+ that denote the counter containing all n-gram counts and reference lengths,
382
+ respectively.
383
+ :return: A list of integers with match statistics.
384
+ """
385
+
386
+ ref_ngrams, ref_lens = ref_kwargs['ref_ngrams'], ref_kwargs['ref_lens']
387
+
388
+ # Extract n-grams for the hypothesis
389
+ hyp_ngrams, hyp_len = extract_all_word_ngrams(
390
+ hypothesis, 1, self.max_ngram_order)
391
+
392
+ ref_len = self._get_closest_ref_len(hyp_len, ref_lens)
393
+
394
+ # Count the stats
395
+ # Although counter has its internal & and | operators, this is faster
396
+ correct = [0 for i in range(self.max_ngram_order)]
397
+ total = correct[:]
398
+ for hyp_ngram, hyp_count in hyp_ngrams.items():
399
+ # n-gram order
400
+ n = len(hyp_ngram) - 1
401
+ # count hypothesis n-grams
402
+ total[n] += hyp_count
403
+ # count matched n-grams
404
+ if hyp_ngram in ref_ngrams:
405
+ correct[n] += min(hyp_count, ref_ngrams[hyp_ngram])
406
+
407
+ # Return a flattened list for efficient computation
408
+ return [hyp_len, ref_len] + correct + total
409
+
410
+ def sentence_score(self, hypothesis: str, references: Sequence[str]) -> BLEUScore:
411
+ """Compute the metric for a single sentence against a single (or multiple) reference(s).
412
+
413
+ :param hypothesis: A single hypothesis string.
414
+ :param references: A sequence of reference strings.
415
+ :return: a `BLEUScore` object.
416
+ """
417
+ if not self.effective_order:
418
+ sacrelogger.warning(
419
+ 'It is recommended to enable `effective_order` for sentence-level BLEU.')
420
+ return super().sentence_score(hypothesis, references)
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/chrf.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of chrF (Popović 2015) and chrF++ (Popović 2017) metrics."""
2
+
3
+ from typing import List, Sequence, Optional, Dict
4
+ from collections import Counter
5
+
6
+ from ..utils import sum_of_lists
7
+ from .base import Score, Signature, Metric
8
+ from .helpers import extract_all_char_ngrams, extract_word_ngrams
9
+
10
+
11
+ class CHRFSignature(Signature):
12
+ """A convenience class to represent the reproducibility signature for chrF.
13
+
14
+ :param args: key-value dictionary passed from the actual metric instance.
15
+ """
16
+ def __init__(self, args: dict):
17
+ """`CHRFSignature` initializer."""
18
+ super().__init__(args)
19
+ self._abbr.update({
20
+ 'case': 'c',
21
+ 'eff': 'e',
22
+ 'nc': 'nc',
23
+ 'nw': 'nw',
24
+ 'space': 's',
25
+ })
26
+
27
+ self.info.update({
28
+ 'case': 'lc' if args['lowercase'] else 'mixed',
29
+ 'eff': 'yes' if not args['eps_smoothing'] else 'no',
30
+ 'nc': args['char_order'],
31
+ 'nw': args['word_order'],
32
+ 'space': 'yes' if args['whitespace'] else 'no',
33
+ })
34
+
35
+
36
+ class CHRFScore(Score):
37
+ """A convenience class to represent chrF scores.
38
+
39
+ :param score: The chrF (chrF++) score.
40
+ :param char_order: The character n-gram order.
41
+ :param word_order: The word n-gram order. If equals to 2, the metric is referred to as chrF++.
42
+ :param beta: Determine the importance of recall w.r.t precision.
43
+ """
44
+ def __init__(self, score: float, char_order: int, word_order: int, beta: int):
45
+ """`CHRFScore` initializer."""
46
+ self.beta = beta
47
+ self.char_order = char_order
48
+ self.word_order = word_order
49
+
50
+ # Add + signs to denote chrF+ variant
51
+ name = f'chrF{self.beta}' + '+' * self.word_order
52
+
53
+ super().__init__(name, score)
54
+
55
+
56
+ class CHRF(Metric):
57
+ """Computes the chrF(++) metric given hypotheses and references.
58
+
59
+ :param char_order: Character n-gram order.
60
+ :param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
61
+ :param beta: Determine the importance of recall w.r.t precision.
62
+ :param lowercase: Enable case-insensitivity.
63
+ :param whitespace: If `True`, include whitespaces when extracting character n-grams.
64
+ :param eps_smoothing: If `True`, applies epsilon smoothing similar
65
+ to reference chrF++.py, NLTK and Moses implementations. Otherwise,
66
+ it takes into account effective match order similar to sacreBLEU < 2.0.0.
67
+ :param references: A sequence of reference documents with document being
68
+ defined as a sequence of reference strings. If given, the reference n-grams
69
+ will be pre-computed and cached for faster re-computation across many systems.
70
+ """
71
+
72
+ # Maximum character n-gram order to take into account
73
+ CHAR_ORDER = 6
74
+
75
+ # chrF+ additionally takes into account some of the word n-grams
76
+ WORD_ORDER = 0
77
+
78
+ # Defaults to 2 (per http://www.aclweb.org/anthology/W16-2341)
79
+ BETA = 2
80
+
81
+ # Cache string.punctuation for chrF+' punctuation stripper
82
+ _PUNCTS = set('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
83
+
84
+ _SIGNATURE_TYPE = CHRFSignature
85
+
86
+ def __init__(self, char_order: int = CHAR_ORDER,
87
+ word_order: int = WORD_ORDER,
88
+ beta: int = BETA,
89
+ lowercase: bool = False,
90
+ whitespace: bool = False,
91
+ eps_smoothing: bool = False,
92
+ references: Optional[Sequence[Sequence[str]]] = None):
93
+ """`CHRF` initializer."""
94
+ super().__init__()
95
+
96
+ self.beta = beta
97
+ self.char_order = char_order
98
+ self.word_order = word_order
99
+ self.order = self.char_order + self.word_order
100
+ self.lowercase = lowercase
101
+ self.whitespace = whitespace
102
+ self.eps_smoothing = eps_smoothing
103
+
104
+ if references is not None:
105
+ # Pre-compute reference ngrams
106
+ self._ref_cache = self._cache_references(references)
107
+
108
+ @staticmethod
109
+ def _get_match_statistics(hyp_ngrams: Counter, ref_ngrams: Counter) -> List[int]:
110
+ """Computes the match statistics between hypothesis and reference n-grams.
111
+
112
+ :param hyp_ngrams: A `Counter` holding hypothesis n-grams.
113
+ :param ref_ngrams: A `Counter` holding reference n-grams.
114
+ :return: A list of three numbers denoting hypothesis n-gram count,
115
+ reference n-gram count and the intersection count.
116
+ """
117
+ # Counter's internal intersection is not that fast, count manually
118
+ match_count, hyp_count = 0, 0
119
+ for ng, count in hyp_ngrams.items():
120
+ hyp_count += count
121
+ if ng in ref_ngrams:
122
+ match_count += min(count, ref_ngrams[ng])
123
+
124
+ return [
125
+ # Don't count hits if no reference exists for that n-gram
126
+ hyp_count if ref_ngrams else 0,
127
+ sum(ref_ngrams.values()),
128
+ match_count,
129
+ ]
130
+
131
+ def _remove_punctuation(self, sent: str) -> List[str]:
132
+ """Separates out punctuations from beginning and end of words for chrF.
133
+ Adapted from https://github.com/m-popovic/chrF
134
+
135
+ :param sent: A string.
136
+ :return: A list of words.
137
+ """
138
+ tokenized = []
139
+ for w in sent.split():
140
+ if len(w) == 1:
141
+ tokenized.append(w)
142
+ else:
143
+ # NOTE: This splits '(hi)' to '(hi' and ')' (issue #124)
144
+ if w[-1] in self._PUNCTS:
145
+ tokenized += [w[:-1], w[-1]]
146
+ elif w[0] in self._PUNCTS:
147
+ tokenized += [w[0], w[1:]]
148
+ else:
149
+ tokenized.append(w)
150
+ return tokenized
151
+
152
+ def _preprocess_segment(self, sent: str) -> str:
153
+ """Given a sentence, apply optional lowercasing.
154
+
155
+ :param sent: The input sentence string.
156
+ :return: The pre-processed output string.
157
+ """
158
+ return sent.lower() if self.lowercase else sent
159
+
160
+ def _compute_f_score(self, statistics: List[int]) -> float:
161
+ """Compute the chrF score given the n-gram match statistics.
162
+
163
+ :param statistics: A flattened list of 3 * (`char_order` + `word_order`)
164
+ elements giving the [hyp, ref, match] counts for each order.
165
+ :return: The final f_beta score between [0, 100].
166
+ """
167
+ eps = 1e-16
168
+ score = 0.0
169
+ effective_order = 0
170
+ factor = self.beta ** 2
171
+ avg_prec, avg_rec = 0.0, 0.0
172
+
173
+ for i in range(self.order):
174
+ n_hyp, n_ref, n_match = statistics[3 * i: 3 * i + 3]
175
+
176
+ # chrF++.py style EPS smoothing (also used by Moses and NLTK)
177
+ prec = n_match / n_hyp if n_hyp > 0 else eps
178
+ rec = n_match / n_ref if n_ref > 0 else eps
179
+
180
+ denom = factor * prec + rec
181
+ score += ((1 + factor) * prec * rec / denom) if denom > 0 else eps
182
+
183
+ # sacreBLEU <2.0.0 style effective order smoothing
184
+ if n_hyp > 0 and n_ref > 0:
185
+ avg_prec += prec
186
+ avg_rec += rec
187
+ effective_order += 1
188
+
189
+ if self.eps_smoothing:
190
+ return 100 * score / self.order
191
+
192
+ if effective_order == 0:
193
+ avg_prec = avg_rec = 0.0
194
+ else:
195
+ avg_prec /= effective_order
196
+ avg_rec /= effective_order
197
+
198
+ if avg_prec + avg_rec:
199
+ score = (1 + factor) * avg_prec * avg_rec
200
+ score /= ((factor * avg_prec) + avg_rec)
201
+ return 100 * score
202
+ else:
203
+ return 0.0
204
+
205
+ def _compute_score_from_stats(self, stats: List[int]) -> CHRFScore:
206
+ """Computes the final score from already aggregated statistics.
207
+
208
+ :param stats: A list or numpy array of segment-level statistics.
209
+ :return: A `CHRFScore` object.
210
+ """
211
+ return CHRFScore(
212
+ self._compute_f_score(stats), self.char_order,
213
+ self.word_order, self.beta)
214
+
215
+ def _aggregate_and_compute(self, stats: List[List[int]]) -> CHRFScore:
216
+ """Computes the final score given the pre-computed corpus statistics.
217
+
218
+ :param stats: A list of segment-level statistics
219
+ :return: A `CHRFScore` object.
220
+ """
221
+ return self._compute_score_from_stats(sum_of_lists(stats))
222
+
223
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, List[List[Counter]]]:
224
+ """Given a list of reference segments, extract the character and word n-grams.
225
+
226
+ :param refs: A sequence of reference segments.
227
+ :return: A list where each element contains n-grams per reference segment.
228
+ """
229
+ ngrams = []
230
+
231
+ for ref in refs:
232
+ # extract character n-grams
233
+ stats = extract_all_char_ngrams(ref, self.char_order, self.whitespace)
234
+
235
+ # Check chrF+ mode
236
+ if self.word_order > 0:
237
+ ref_words = self._remove_punctuation(ref)
238
+
239
+ for n in range(self.word_order):
240
+ stats.append(extract_word_ngrams(ref_words, n + 1))
241
+
242
+ ngrams.append(stats)
243
+
244
+ return {'ref_ngrams': ngrams}
245
+
246
+ def _compute_segment_statistics(
247
+ self, hypothesis: str, ref_kwargs: Dict) -> List[int]:
248
+ """Given a (pre-processed) hypothesis sentence and already computed
249
+ reference n-grams, returns the best match statistics across the
250
+ references.
251
+
252
+ :param hypothesis: Hypothesis sentence.
253
+ :param ref_kwargs: A dictionary with key `ref_ngrams` which is a list
254
+ where each sublist contains n-gram counters for a particular reference sentence.
255
+ :return: A list of integers where each triplet denotes [hyp, ref, match]
256
+ statistics.
257
+ """
258
+ best_stats = []
259
+ best_f_score = -1.0
260
+
261
+ # extract character n-grams
262
+ all_hyp_ngrams = extract_all_char_ngrams(
263
+ hypothesis, self.char_order, self.whitespace)
264
+
265
+ # Check chrF+ mode to see if we'll add word n-grams as well
266
+ if self.word_order > 0:
267
+ # Primitive tokenization: separate out punctuations
268
+ hwords = self._remove_punctuation(hypothesis)
269
+ _range = range(1, self.word_order + 1)
270
+ all_hyp_ngrams.extend([extract_word_ngrams(hwords, n) for n in _range])
271
+
272
+ # Iterate over multiple references, pick the one with best F score
273
+ for _ref_ngrams in ref_kwargs['ref_ngrams']:
274
+ stats = []
275
+ # Traverse all orders
276
+ for h, r in zip(all_hyp_ngrams, _ref_ngrams):
277
+ stats.extend(self._get_match_statistics(h, r))
278
+ f_score = self._compute_f_score(stats)
279
+
280
+ if f_score > best_f_score:
281
+ best_f_score = f_score
282
+ best_stats = stats
283
+
284
+ return best_stats
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/helpers.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Various utility functions for word and character n-gram extraction."""
2
+
3
+ from collections import Counter
4
+ from typing import List, Tuple
5
+
6
+
7
+ def extract_all_word_ngrams(line: str, min_order: int, max_order: int) -> Tuple[Counter, int]:
8
+ """Extracts all ngrams (min_order <= n <= max_order) from a sentence.
9
+
10
+ :param line: A string sentence.
11
+ :param min_order: Minimum n-gram order.
12
+ :param max_order: Maximum n-gram order.
13
+ :return: a Counter object with n-grams counts and the sequence length.
14
+ """
15
+
16
+ ngrams = []
17
+ tokens = line.split()
18
+
19
+ for n in range(min_order, max_order + 1):
20
+ for i in range(0, len(tokens) - n + 1):
21
+ ngrams.append(tuple(tokens[i: i + n]))
22
+
23
+ return Counter(ngrams), len(tokens)
24
+
25
+
26
+ def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
27
+ """Extracts n-grams with order `n` from a list of tokens.
28
+
29
+ :param tokens: A list of tokens.
30
+ :param n: The order of n-grams.
31
+ :return: a Counter object with n-grams counts.
32
+ """
33
+ return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)])
34
+
35
+
36
+ def extract_char_ngrams(line: str, n: int, include_whitespace: bool = False) -> Counter:
37
+ """Yields counts of character n-grams from a sentence.
38
+
39
+ :param line: A segment containing a sequence of words.
40
+ :param n: The order of the n-grams.
41
+ :param include_whitespace: If given, will not strip whitespaces from the line.
42
+ :return: a dictionary containing ngrams and counts
43
+ """
44
+ if not include_whitespace:
45
+ line = ''.join(line.split())
46
+
47
+ return Counter([line[i:i + n] for i in range(len(line) - n + 1)])
48
+
49
+
50
+ def extract_all_char_ngrams(
51
+ line: str, max_order: int, include_whitespace: bool = False) -> List[Counter]:
52
+ """Extracts all character n-grams at once for convenience.
53
+
54
+ :param line: A segment containing a sequence of words.
55
+ :param max_order: The maximum order of the n-grams.
56
+ :param include_whitespace: If given, will not strip whitespaces from the line.
57
+ :return: a list of Counter objects containing ngrams and counts.
58
+ """
59
+
60
+ counters = []
61
+
62
+ if not include_whitespace:
63
+ line = ''.join(line.split())
64
+
65
+ for n in range(1, max_order + 1):
66
+ ngrams = Counter([line[i:i + n] for i in range(len(line) - n + 1)])
67
+ counters.append(ngrams)
68
+
69
+ return counters
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/lib_ter.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This module implements various utility functions for the TER metric."""
2
+
3
+ # Copyright 2020 Memsource
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import math
19
+ from typing import List, Tuple, Dict
20
+
21
+
22
+ _COST_INS = 1
23
+ _COST_DEL = 1
24
+ _COST_SUB = 1
25
+
26
+ # Tercom-inspired limits
27
+ _MAX_SHIFT_SIZE = 10
28
+ _MAX_SHIFT_DIST = 50
29
+ _BEAM_WIDTH = 25
30
+
31
+ # Our own limits
32
+ _MAX_CACHE_SIZE = 10000
33
+ _MAX_SHIFT_CANDIDATES = 1000
34
+ _INT_INFINITY = int(1e16)
35
+
36
+ _OP_INS = 'i'
37
+ _OP_DEL = 'd'
38
+ _OP_NOP = ' '
39
+ _OP_SUB = 's'
40
+ _OP_UNDEF = 'x'
41
+
42
+ _FLIP_OPS = str.maketrans(_OP_INS + _OP_DEL, _OP_DEL + _OP_INS)
43
+
44
+
45
+ def translation_edit_rate(words_hyp: List[str], words_ref: List[str]) -> Tuple[int, int]:
46
+ """Calculate the translation edit rate.
47
+
48
+ :param words_hyp: Tokenized translation hypothesis.
49
+ :param words_ref: Tokenized reference translation.
50
+ :return: tuple (number of edits, length)
51
+ """
52
+ n_words_ref = len(words_ref)
53
+ n_words_hyp = len(words_hyp)
54
+ if n_words_ref == 0:
55
+ # FIXME: This trace here is not used?
56
+ trace = _OP_DEL * n_words_hyp
57
+ # special treatment of empty refs
58
+ return n_words_hyp, 0
59
+
60
+ cached_ed = BeamEditDistance(words_ref)
61
+ shifts = 0
62
+
63
+ input_words = words_hyp
64
+ checked_candidates = 0
65
+ while True:
66
+ # do shifts until they stop reducing the edit distance
67
+ delta, new_input_words, checked_candidates = _shift(
68
+ input_words, words_ref, cached_ed, checked_candidates)
69
+
70
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
71
+ break
72
+
73
+ if delta <= 0:
74
+ break
75
+ shifts += 1
76
+ input_words = new_input_words
77
+
78
+ edit_distance, trace = cached_ed(input_words)
79
+ total_edits = shifts + edit_distance
80
+
81
+ return total_edits, n_words_ref
82
+
83
+
84
+ def _shift(words_h: List[str], words_r: List[str], cached_ed,
85
+ checked_candidates: int) -> Tuple[int, List[str], int]:
86
+ """Attempt to shift words in hypothesis to match reference.
87
+
88
+ Returns the shift that reduces the edit distance the most.
89
+
90
+ Note that the filtering of possible shifts and shift selection are heavily
91
+ based on somewhat arbitrary heuristics. The code here follows as closely
92
+ as possible the logic in Tercom, not always justifying the particular design
93
+ choices.
94
+
95
+ :param words_h: Hypothesis.
96
+ :param words_r: Reference.
97
+ :param cached_ed: Cached edit distance.
98
+ :param checked_candidates: Number of shift candidates that were already
99
+ evaluated.
100
+ :return: (score, shifted_words, checked_candidates). Best shift and updated
101
+ number of evaluated shift candidates.
102
+ """
103
+ pre_score, inv_trace = cached_ed(words_h)
104
+
105
+ # to get alignment, we pretend we are rewriting reference into hypothesis,
106
+ # so we need to flip the trace of edit operations
107
+ trace = _flip_trace(inv_trace)
108
+ align, ref_err, hyp_err = trace_to_alignment(trace)
109
+
110
+ best = None
111
+
112
+ for start_h, start_r, length in _find_shifted_pairs(words_h, words_r):
113
+ # don't do the shift unless both the hypothesis was wrong and the
114
+ # reference doesn't match hypothesis at the target position
115
+ if sum(hyp_err[start_h: start_h + length]) == 0:
116
+ continue
117
+
118
+ if sum(ref_err[start_r: start_r + length]) == 0:
119
+ continue
120
+
121
+ # don't try to shift within the subsequence
122
+ if start_h <= align[start_r] < start_h + length:
123
+ continue
124
+
125
+ prev_idx = -1
126
+ for offset in range(-1, length):
127
+ if start_r + offset == -1:
128
+ idx = 0 # insert before the beginning
129
+ elif start_r + offset in align:
130
+ # Unlike Tercom which inserts *after* the index, we insert
131
+ # *before* the index.
132
+ idx = align[start_r + offset] + 1
133
+ else:
134
+ break # offset is out of bounds => aims past reference
135
+
136
+ if idx == prev_idx:
137
+ continue # skip idx if already tried
138
+
139
+ prev_idx = idx
140
+
141
+ shifted_words = _perform_shift(words_h, start_h, length, idx)
142
+ assert(len(shifted_words) == len(words_h))
143
+
144
+ # Elements of the tuple are designed to replicate Tercom ranking
145
+ # of shifts:
146
+ candidate = (
147
+ pre_score - cached_ed(shifted_words)[0], # highest score first
148
+ length, # then, longest match first
149
+ -start_h, # then, earliest match first
150
+ -idx, # then, earliest target position first
151
+ shifted_words,
152
+ )
153
+
154
+ checked_candidates += 1
155
+
156
+ if not best or candidate > best:
157
+ best = candidate
158
+
159
+ if checked_candidates >= _MAX_SHIFT_CANDIDATES:
160
+ break
161
+
162
+ if not best:
163
+ return 0, words_h, checked_candidates
164
+ else:
165
+ best_score, _, _, _, shifted_words = best
166
+ return best_score, shifted_words, checked_candidates
167
+
168
+
169
+ def _perform_shift(words: List[str], start: int, length: int, target: int) -> List[str]:
170
+ """Perform a shift in `words` from `start` to `target`.
171
+
172
+ :param words: Words to shift.
173
+ :param start: Where from.
174
+ :param length: How many words.
175
+ :param target: Where to.
176
+ :return: Shifted words.
177
+ """
178
+ if target < start:
179
+ # shift before previous position
180
+ return words[:target] + words[start: start + length] \
181
+ + words[target: start] + words[start + length:]
182
+ elif target > start + length:
183
+ # shift after previous position
184
+ return words[:start] + words[start + length: target] \
185
+ + words[start: start + length] + words[target:]
186
+ else:
187
+ # shift within the shifted string
188
+ return words[:start] + words[start + length: length + target] \
189
+ + words[start: start + length] + words[length + target:]
190
+
191
+
192
+ def _find_shifted_pairs(words_h: List[str], words_r: List[str]):
193
+ """Find matching word sub-sequences in two lists of words.
194
+
195
+ Ignores sub-sequences starting at the same position.
196
+
197
+ :param words_h: First word list.
198
+ :param words_r: Second word list.
199
+ :return: Yields tuples of (h_start, r_start, length) such that:
200
+ words_h[h_start:h_start+length] = words_r[r_start:r_start+length]
201
+ """
202
+ n_words_h = len(words_h)
203
+ n_words_r = len(words_r)
204
+ for start_h in range(n_words_h):
205
+ for start_r in range(n_words_r):
206
+ # this is slightly different from what tercom does but this should
207
+ # really only kick in in degenerate cases
208
+ if abs(start_r - start_h) > _MAX_SHIFT_DIST:
209
+ continue
210
+
211
+ length = 0
212
+ while words_h[start_h + length] == words_r[start_r + length] and length < _MAX_SHIFT_SIZE:
213
+ length += 1
214
+
215
+ yield start_h, start_r, length
216
+
217
+ # If one sequence is consumed, stop processing
218
+ if n_words_h == start_h + length or n_words_r == start_r + length:
219
+ break
220
+
221
+
222
+ def _flip_trace(trace):
223
+ """Flip the trace of edit operations.
224
+
225
+ Instead of rewriting a->b, get a recipe for rewriting b->a.
226
+
227
+ Simply flips insertions and deletions.
228
+ """
229
+ return trace.translate(_FLIP_OPS)
230
+
231
+
232
+ def trace_to_alignment(trace: str) -> Tuple[Dict, List, List]:
233
+ """Transform trace of edit operations into an alignment of the sequences.
234
+
235
+ :param trace: Trace of edit operations (' '=no change or 's'/'i'/'d').
236
+ :return: Alignment, error positions in reference, error positions in hypothesis.
237
+ """
238
+ pos_hyp = -1
239
+ pos_ref = -1
240
+ hyp_err = []
241
+ ref_err = []
242
+ align = {}
243
+
244
+ # we are rewriting a into b
245
+ for op in trace:
246
+ if op == _OP_NOP:
247
+ pos_hyp += 1
248
+ pos_ref += 1
249
+ align[pos_ref] = pos_hyp
250
+ hyp_err.append(0)
251
+ ref_err.append(0)
252
+ elif op == _OP_SUB:
253
+ pos_hyp += 1
254
+ pos_ref += 1
255
+ align[pos_ref] = pos_hyp
256
+ hyp_err.append(1)
257
+ ref_err.append(1)
258
+ elif op == _OP_INS:
259
+ pos_hyp += 1
260
+ hyp_err.append(1)
261
+ elif op == _OP_DEL:
262
+ pos_ref += 1
263
+ align[pos_ref] = pos_hyp
264
+ ref_err.append(1)
265
+ else:
266
+ raise Exception(f"unknown operation {op!r}")
267
+
268
+ return align, ref_err, hyp_err
269
+
270
+
271
+ class BeamEditDistance:
272
+ """Edit distance with several features required for TER calculation.
273
+
274
+ * internal cache
275
+ * "beam" search
276
+ * tracking of edit operations
277
+
278
+ The internal self._cache works like this:
279
+
280
+ Keys are words of the hypothesis. Values are tuples (next_node, row) where:
281
+
282
+ * next_node is the cache for the next word in the sequence
283
+ * row is the stored row of the edit distance matrix
284
+
285
+ Effectively, caching allows to skip several rows in the edit distance
286
+ matrix calculation and instead, to initialize the computation with the last
287
+ matching matrix row.
288
+
289
+ Beam search, as implemented here, only explores a fixed-size sub-row of
290
+ candidates around the matrix diagonal (more precisely, it's a
291
+ "pseudo"-diagonal since we take the ratio of sequence lengths into account).
292
+
293
+ Tracking allows to reconstruct the optimal sequence of edit operations.
294
+
295
+ :param words_ref: A list of reference tokens.
296
+ """
297
+ def __init__(self, words_ref: List[str]):
298
+ """`BeamEditDistance` initializer."""
299
+ self._words_ref = words_ref
300
+ self._n_words_ref = len(self._words_ref)
301
+
302
+ # first row corresponds to insertion operations of the reference,
303
+ # so we do 1 edit operation per reference word
304
+ self._initial_row = [(i * _COST_INS, _OP_INS)
305
+ for i in range(self._n_words_ref + 1)]
306
+
307
+ self._cache = {} # type: Dict[str, Tuple]
308
+ self._cache_size = 0
309
+
310
+ # Precomputed empty matrix row. Contains infinities so that beam search
311
+ # avoids using the uninitialized cells.
312
+ self._empty_row = [(_INT_INFINITY, _OP_UNDEF)] * (self._n_words_ref + 1)
313
+
314
+ def __call__(self, words_hyp: List[str]) -> Tuple[int, str]:
315
+ """Calculate edit distance between self._words_ref and the hypothesis.
316
+
317
+ Uses cache to skip some of the computation.
318
+
319
+ :param words_hyp: Words in translation hypothesis.
320
+ :return: Edit distance score.
321
+ """
322
+
323
+ # skip initial words in the hypothesis for which we already know the
324
+ # edit distance
325
+ start_position, dist = self._find_cache(words_hyp)
326
+
327
+ # calculate the rest of the edit distance matrix
328
+ edit_distance, newly_created_matrix, trace = self._edit_distance(
329
+ words_hyp, start_position, dist)
330
+
331
+ # update our cache with the newly calculated rows
332
+ self._add_cache(words_hyp, newly_created_matrix)
333
+
334
+ return edit_distance, trace
335
+
336
+ def _edit_distance(self, words_h: List[str], start_h: int,
337
+ cache: List[List[Tuple[int, str]]]) -> Tuple[int, List, str]:
338
+ """Actual edit distance calculation.
339
+
340
+ Can be initialized with the last cached row and a start position in
341
+ the hypothesis that it corresponds to.
342
+
343
+ :param words_h: Words in translation hypothesis.
344
+ :param start_h: Position from which to start the calculation.
345
+ (This is zero if no cache match was found.)
346
+ :param cache: Precomputed rows corresponding to edit distance matrix
347
+ before `start_h`.
348
+ :return: Edit distance value, newly computed rows to update the
349
+ cache, trace.
350
+ """
351
+
352
+ n_words_h = len(words_h)
353
+
354
+ # initialize the rest of the matrix with infinite edit distances
355
+ rest_empty = [list(self._empty_row)
356
+ for _ in range(n_words_h - start_h)]
357
+
358
+ dist = cache + rest_empty
359
+
360
+ assert len(dist) == n_words_h + 1
361
+
362
+ length_ratio = self._n_words_ref / n_words_h if words_h else 1
363
+
364
+ # in some crazy sentences, the difference in length is so large that
365
+ # we may end up with zero overlap with previous row
366
+ if _BEAM_WIDTH < length_ratio / 2:
367
+ beam_width = math.ceil(length_ratio / 2 + _BEAM_WIDTH)
368
+ else:
369
+ beam_width = _BEAM_WIDTH
370
+
371
+ # calculate the Levenshtein distance
372
+ for i in range(start_h + 1, n_words_h + 1):
373
+ pseudo_diag = math.floor(i * length_ratio)
374
+ min_j = max(0, pseudo_diag - beam_width)
375
+ max_j = min(self._n_words_ref + 1, pseudo_diag + beam_width)
376
+
377
+ if i == n_words_h:
378
+ max_j = self._n_words_ref + 1
379
+
380
+ for j in range(min_j, max_j):
381
+ if j == 0:
382
+ dist[i][j] = (dist[i - 1][j][0] + _COST_DEL, _OP_DEL)
383
+ else:
384
+ if words_h[i - 1] == self._words_ref[j - 1]:
385
+ cost_sub = 0
386
+ op_sub = _OP_NOP
387
+ else:
388
+ cost_sub = _COST_SUB
389
+ op_sub = _OP_SUB
390
+
391
+ # Tercom prefers no-op/sub, then insertion, then deletion.
392
+ # But since we flip the trace and compute the alignment from
393
+ # the inverse, we need to swap order of insertion and
394
+ # deletion in the preference.
395
+ ops = (
396
+ (dist[i - 1][j - 1][0] + cost_sub, op_sub),
397
+ (dist[i - 1][j][0] + _COST_DEL, _OP_DEL),
398
+ (dist[i][j - 1][0] + _COST_INS, _OP_INS),
399
+ )
400
+
401
+ for op_cost, op_name in ops:
402
+ if dist[i][j][0] > op_cost:
403
+ dist[i][j] = op_cost, op_name
404
+
405
+ # get the trace
406
+ trace = ""
407
+ i = n_words_h
408
+ j = self._n_words_ref
409
+
410
+ while i > 0 or j > 0:
411
+ op = dist[i][j][1]
412
+ trace = op + trace
413
+ if op in (_OP_SUB, _OP_NOP):
414
+ i -= 1
415
+ j -= 1
416
+ elif op == _OP_INS:
417
+ j -= 1
418
+ elif op == _OP_DEL:
419
+ i -= 1
420
+ else:
421
+ raise Exception(f"unknown operation {op!r}")
422
+
423
+ return dist[-1][-1][0], dist[len(cache):], trace
424
+
425
+ def _add_cache(self, words_hyp: List[str], mat: List[List[Tuple]]):
426
+ """Add newly computed rows to cache.
427
+
428
+ Since edit distance is only calculated on the hypothesis suffix that
429
+ was not in cache, the number of rows in `mat` may be shorter than
430
+ hypothesis length. In that case, we skip over these initial words.
431
+
432
+ :param words_hyp: Hypothesis words.
433
+ :param mat: Edit distance matrix rows for each position.
434
+ """
435
+ if self._cache_size >= _MAX_CACHE_SIZE:
436
+ return
437
+
438
+ node = self._cache
439
+
440
+ n_mat = len(mat)
441
+
442
+ # how many initial words to skip
443
+ skip_num = len(words_hyp) - n_mat
444
+
445
+ # jump through the cache to the current position
446
+ for i in range(skip_num):
447
+ node = node[words_hyp[i]][0]
448
+
449
+ assert len(words_hyp[skip_num:]) == n_mat
450
+
451
+ # update cache with newly computed rows
452
+ for word, row in zip(words_hyp[skip_num:], mat):
453
+ if word not in node:
454
+ node[word] = ({}, tuple(row))
455
+ self._cache_size += 1
456
+ value = node[word]
457
+ node = value[0]
458
+
459
+ def _find_cache(self, words_hyp: List[str]) -> Tuple[int, List[List]]:
460
+ """Find the already computed rows of the edit distance matrix in cache.
461
+
462
+ Returns a partially computed edit distance matrix.
463
+
464
+ :param words_hyp: Translation hypothesis.
465
+ :return: Tuple (start position, dist).
466
+ """
467
+ node = self._cache
468
+ start_position = 0
469
+ dist = [self._initial_row]
470
+ for word in words_hyp:
471
+ if word in node:
472
+ start_position += 1
473
+ node, row = node[word]
474
+ dist.append(row)
475
+ else:
476
+ break
477
+
478
+ return start_position, dist
llmeval-env/lib/python3.10/site-packages/sacrebleu/metrics/ter.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The implementation of the TER metric (Snover et al., 2006)."""
2
+
3
+ # Copyright 2020 Memsource
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ from typing import List, Dict, Sequence, Optional, Any
19
+
20
+ from ..tokenizers.tokenizer_ter import TercomTokenizer
21
+ from ..utils import sum_of_lists
22
+ from .base import Score, Signature, Metric
23
+ from .lib_ter import translation_edit_rate
24
+
25
+
26
+ class TERSignature(Signature):
27
+ """A convenience class to represent the reproducibility signature for TER.
28
+
29
+ :param args: key-value dictionary passed from the actual metric instance.
30
+ """
31
+ def __init__(self, args: dict):
32
+ """`TERSignature` initializer."""
33
+ super().__init__(args)
34
+ self._abbr.update({
35
+ 'case': 'c',
36
+ 'tok': 't',
37
+ 'norm': 'nr',
38
+ 'punct': 'pn',
39
+ 'asian': 'as',
40
+ })
41
+
42
+ self.info.update({
43
+ 'case': 'mixed' if args['case_sensitive'] else 'lc',
44
+ 'tok': args['tokenizer_signature'],
45
+ 'norm': args['normalized'],
46
+ 'punct': not args['no_punct'],
47
+ 'asian': args['asian_support'],
48
+ })
49
+
50
+
51
+ class TERScore(Score):
52
+ """A convenience class to represent TER scores.
53
+
54
+ :param score: The TER score.
55
+ :param num_edits: The cumulative number of edits.
56
+ :param ref_length: The cumulative average reference length.
57
+ """
58
+ def __init__(self, score: float, num_edits: float, ref_length: float):
59
+ """`TERScore` initializer."""
60
+ super().__init__('TER', score)
61
+ self.num_edits = int(num_edits)
62
+ self.ref_length = ref_length
63
+
64
+
65
+ class TER(Metric):
66
+ """Translation edit rate (TER). A near-exact reimplementation of the Tercom
67
+ algorithm, produces identical results on all "sane" outputs.
68
+
69
+ Tercom original implementation: https://github.com/jhclark/tercom
70
+
71
+ The beam edit distance algorithm uses a slightly different approach (we stay
72
+ around the diagonal which is faster, at least in Python) so in some
73
+ (extreme) corner cases, the output could differ.
74
+
75
+ Caching in the edit distance is based partly on the PyTer package by Hiroyuki
76
+ Tanaka (MIT license). (https://github.com/aflc/pyter)
77
+
78
+ :param normalized: Enable character normalization. By default, normalizes a couple of things such as
79
+ newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When
80
+ 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e.
81
+ split them down to the character level.
82
+ :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical
83
+ punctuation markers in Asian languages (CJK).
84
+ :param asian_support: Enable special treatment of Asian characters. This option only has an effect when
85
+ 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK)
86
+ characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support',
87
+ specific unicode ranges for CJK and full-width punctuations are also removed.
88
+ :param case_sensitive: If `True`, does not lowercase sentences.
89
+ :param references: A sequence of reference documents with document being
90
+ defined as a sequence of reference strings. If given, the reference info
91
+ will be pre-computed and cached for faster re-computation across many systems.
92
+ """
93
+
94
+ _SIGNATURE_TYPE = TERSignature
95
+
96
+ def __init__(self, normalized: bool = False,
97
+ no_punct: bool = False,
98
+ asian_support: bool = False,
99
+ case_sensitive: bool = False,
100
+ references: Optional[Sequence[Sequence[str]]] = None):
101
+ """`TER` initializer."""
102
+ super().__init__()
103
+
104
+ self.no_punct = no_punct
105
+ self.normalized = normalized
106
+ self.asian_support = asian_support
107
+ self.case_sensitive = case_sensitive
108
+
109
+ self.tokenizer = TercomTokenizer(
110
+ normalized=self.normalized,
111
+ no_punct=self.no_punct,
112
+ asian_support=self.asian_support,
113
+ case_sensitive=self.case_sensitive,
114
+ )
115
+ self.tokenizer_signature = self.tokenizer.signature()
116
+
117
+ if references is not None:
118
+ self._ref_cache = self._cache_references(references)
119
+
120
+ def _preprocess_segment(self, sent: str) -> str:
121
+ """Given a sentence, apply tokenization if enabled.
122
+
123
+ :param sent: The input sentence string.
124
+ :return: The pre-processed output string.
125
+ """
126
+ return self.tokenizer(sent.rstrip())
127
+
128
+ def _compute_score_from_stats(self, stats: List[float]) -> TERScore:
129
+ """Computes the final score from already aggregated statistics.
130
+
131
+ :param stats: A list or numpy array of segment-level statistics.
132
+ :return: A `TERScore` object.
133
+ """
134
+ total_edits, sum_ref_lengths = stats[0], stats[1]
135
+
136
+ if sum_ref_lengths > 0:
137
+ score = total_edits / sum_ref_lengths
138
+ elif total_edits > 0:
139
+ score = 1.0 # empty reference(s) and non-empty hypothesis
140
+ else:
141
+ score = 0.0 # both reference(s) and hypothesis are empty
142
+
143
+ return TERScore(100 * score, total_edits, sum_ref_lengths)
144
+
145
+ def _aggregate_and_compute(self, stats: List[List[float]]) -> TERScore:
146
+ """Computes the final TER score given the pre-computed corpus statistics.
147
+
148
+ :param stats: A list of segment-level statistics
149
+ :return: A `TERScore` instance.
150
+ """
151
+ return self._compute_score_from_stats(sum_of_lists(stats))
152
+
153
+ def _compute_segment_statistics(
154
+ self, hypothesis: str, ref_kwargs: Dict) -> List[float]:
155
+ """Given a (pre-processed) hypothesis sentence and already computed
156
+ reference words, returns the segment statistics required to compute
157
+ the full TER score.
158
+
159
+ :param hypothesis: Hypothesis sentence.
160
+ :param ref_kwargs: A dictionary with `ref_words` key which is a list
161
+ where each sublist contains reference words.
162
+ :return: A two-element list that contains the 'minimum number of edits'
163
+ and 'the average reference length'.
164
+ """
165
+
166
+ ref_lengths = 0
167
+ best_num_edits = int(1e16)
168
+
169
+ words_hyp = hypothesis.split()
170
+
171
+ # Iterate the references
172
+ ref_words = ref_kwargs['ref_words']
173
+ for words_ref in ref_words:
174
+ num_edits, ref_len = translation_edit_rate(words_hyp, words_ref)
175
+ ref_lengths += ref_len
176
+ if num_edits < best_num_edits:
177
+ best_num_edits = num_edits
178
+
179
+ avg_ref_len = ref_lengths / len(ref_words)
180
+ return [best_num_edits, avg_ref_len]
181
+
182
+ def _extract_reference_info(self, refs: Sequence[str]) -> Dict[str, Any]:
183
+ """Given a list of reference segments, applies pre-processing & tokenization
184
+ and returns list of tokens for each reference.
185
+
186
+ :param refs: A sequence of strings.
187
+ :return: A dictionary that will be passed to `_compute_segment_statistics()`
188
+ through keyword arguments.
189
+ """
190
+ ref_words = []
191
+
192
+ for ref in refs:
193
+ ref_words.append(self._preprocess_segment(ref).split())
194
+
195
+ return {'ref_words': ref_words}
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Base tokenizer to derive from
2
+ from .tokenizer_base import BaseTokenizer # noqa: F401
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (245 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_13a.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_base.cpython-310.pyc ADDED
Binary file (916 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_char.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_intl.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ja_mecab.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ko_mecab.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_none.cpython-310.pyc ADDED
Binary file (738 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_re.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_spm.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_ter.cpython-310.pyc ADDED
Binary file (4.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/__pycache__/tokenizer_zh.cpython-310.pyc ADDED
Binary file (2.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_13a.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from .tokenizer_base import BaseTokenizer
3
+ from .tokenizer_re import TokenizerRegexp
4
+
5
+
6
+ class Tokenizer13a(BaseTokenizer):
7
+
8
+ def signature(self):
9
+ return '13a'
10
+
11
+ def __init__(self):
12
+ self._post_tokenizer = TokenizerRegexp()
13
+
14
+ @lru_cache(maxsize=2**16)
15
+ def __call__(self, line):
16
+ """Tokenizes an input line using a relatively minimal tokenization
17
+ that is however equivalent to mteval-v13a, used by WMT.
18
+
19
+ :param line: a segment to tokenize
20
+ :return: the tokenized line
21
+ """
22
+
23
+ # language-independent part:
24
+ line = line.replace('<skipped>', '')
25
+ line = line.replace('-\n', '')
26
+ line = line.replace('\n', ' ')
27
+
28
+ if '&' in line:
29
+ line = line.replace('&quot;', '"')
30
+ line = line.replace('&amp;', '&')
31
+ line = line.replace('&lt;', '<')
32
+ line = line.replace('&gt;', '>')
33
+
34
+ return self._post_tokenizer(f' {line} ')
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_base.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class BaseTokenizer:
2
+ """A base dummy tokenizer to derive from."""
3
+
4
+ def signature(self):
5
+ """
6
+ Returns a signature for the tokenizer.
7
+
8
+ :return: signature string
9
+ """
10
+ raise NotImplementedError()
11
+
12
+ def __call__(self, line):
13
+ """
14
+ Tokenizes an input line with the tokenizer.
15
+
16
+ :param line: a segment to tokenize
17
+ :return: the tokenized line
18
+ """
19
+ raise NotImplementedError()
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_char.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from .tokenizer_base import BaseTokenizer
3
+
4
+
5
+ class TokenizerChar(BaseTokenizer):
6
+ def signature(self):
7
+ return 'char'
8
+
9
+ def __init__(self):
10
+ pass
11
+
12
+ @lru_cache(maxsize=2**16)
13
+ def __call__(self, line):
14
+ """Tokenizes all the characters in the input line.
15
+
16
+ :param line: a segment to tokenize
17
+ :return: the tokenized line
18
+ """
19
+ return " ".join((char for char in line))
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_intl.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ import regex
4
+
5
+ from .tokenizer_base import BaseTokenizer
6
+
7
+
8
+ class TokenizerV14International(BaseTokenizer):
9
+ """Tokenizes a string following the official BLEU implementation.
10
+
11
+ See github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983
12
+
13
+ In our case, the input string is expected to be just one line.
14
+ We just tokenize on punctuation and symbols,
15
+ except when a punctuation is preceded and followed by a digit
16
+ (e.g. a comma/dot as a thousand/decimal separator).
17
+ We do not recover escaped forms of punctuations such as &apos; or &gt;
18
+ as these should never appear in MT system outputs (see issue #138)
19
+
20
+ Note that a number (e.g., a year) followed by a dot at the end of
21
+ sentence is NOT tokenized, i.e. the dot stays with the number because
22
+ `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a
23
+ space after each sentence). However, this error is already in the
24
+ original mteval-v14.pl and we want to be consistent with it.
25
+ The error is not present in the non-international version,
26
+ which uses `$norm_text = " $norm_text "`.
27
+
28
+ :param line: the input string to tokenize.
29
+ :return: The tokenized string.
30
+ """
31
+
32
+ def signature(self):
33
+ return 'intl'
34
+
35
+ def __init__(self):
36
+ self._re = [
37
+ # Separate out punctuations preceeded by a non-digit
38
+ (regex.compile(r'(\P{N})(\p{P})'), r'\1 \2 '),
39
+ # Separate out punctuations followed by a non-digit
40
+ (regex.compile(r'(\p{P})(\P{N})'), r' \1 \2'),
41
+ # Separate out symbols
42
+ (regex.compile(r'(\p{S})'), r' \1 '),
43
+ ]
44
+
45
+ @lru_cache(maxsize=2**16)
46
+ def __call__(self, line: str) -> str:
47
+ for (_re, repl) in self._re:
48
+ line = _re.sub(repl, line)
49
+
50
+ return ' '.join(line.split())
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ja_mecab.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ try:
4
+ import MeCab
5
+ import ipadic
6
+ except ImportError:
7
+ # Don't fail until the tokenizer is actually used
8
+ MeCab = None
9
+
10
+ from .tokenizer_base import BaseTokenizer
11
+
12
+ FAIL_MESSAGE = """
13
+ Japanese tokenization requires extra dependencies, but you do not have them installed.
14
+ Please install them like so.
15
+
16
+ pip install sacrebleu[ja]
17
+ """
18
+
19
+
20
+ class TokenizerJaMecab(BaseTokenizer):
21
+ def __init__(self):
22
+ if MeCab is None:
23
+ raise RuntimeError(FAIL_MESSAGE)
24
+ self.tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
25
+
26
+ # make sure the dictionary is IPA
27
+ d = self.tagger.dictionary_info()
28
+ assert d.size == 392126, \
29
+ "Please make sure to use the IPA dictionary for MeCab"
30
+ # This asserts that no user dictionary has been loaded
31
+ assert d.next is None
32
+
33
+ @lru_cache(maxsize=2**16)
34
+ def __call__(self, line):
35
+ """
36
+ Tokenizes an Japanese input line using MeCab morphological analyzer.
37
+
38
+ :param line: a segment to tokenize
39
+ :return: the tokenized line
40
+ """
41
+ line = line.strip()
42
+ sentence = self.tagger.parse(line).strip()
43
+ return sentence
44
+
45
+ def signature(self):
46
+ """
47
+ Returns the MeCab parameters.
48
+
49
+ :return: signature string
50
+ """
51
+ signature = self.tagger.version() + "-IPA"
52
+ return 'ja-mecab-' + signature
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ko_mecab.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+ try:
4
+ import mecab_ko as MeCab
5
+ import mecab_ko_dic
6
+ except ImportError:
7
+ # Don't fail until the tokenizer is actually used
8
+ MeCab = None
9
+
10
+ from .tokenizer_base import BaseTokenizer
11
+
12
+ FAIL_MESSAGE = """
13
+ Korean tokenization requires extra dependencies, but you do not have them installed.
14
+ Please install them like so.
15
+
16
+ pip install sacrebleu[ko]
17
+ """
18
+
19
+
20
+ class TokenizerKoMecab(BaseTokenizer):
21
+ def __init__(self):
22
+ if MeCab is None:
23
+ raise RuntimeError(FAIL_MESSAGE)
24
+ self.tagger = MeCab.Tagger(mecab_ko_dic.MECAB_ARGS + " -Owakati")
25
+
26
+ # make sure the dictionary is mecab-ko-dic
27
+ d = self.tagger.dictionary_info()
28
+ assert d.size == 811795, \
29
+ "Please make sure to use the mecab-ko-dic for MeCab-ko"
30
+ # This asserts that no user dictionary has been loaded
31
+ assert d.next is None
32
+
33
+ @lru_cache(maxsize=2**16)
34
+ def __call__(self, line):
35
+ """
36
+ Tokenizes an Korean input line using MeCab-ko morphological analyzer.
37
+
38
+ :param line: a segment to tokenize
39
+ :return: the tokenized line
40
+ """
41
+ line = line.strip()
42
+ sentence = self.tagger.parse(line).strip()
43
+ return sentence
44
+
45
+ def signature(self):
46
+ """
47
+ Returns the MeCab-ko parameters.
48
+
49
+ :return: signature string
50
+ """
51
+ signature = self.tagger.version() + "-KO"
52
+ return 'ko-mecab-' + signature
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_none.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .tokenizer_base import BaseTokenizer
2
+
3
+ class NoneTokenizer(BaseTokenizer):
4
+ """Don't apply any tokenization. Not recommended!."""
5
+
6
+ def signature(self):
7
+ return 'none'
8
+
9
+ def __call__(self, line):
10
+ return line
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_re.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ import re
3
+
4
+ from .tokenizer_base import BaseTokenizer
5
+
6
+
7
+ class TokenizerRegexp(BaseTokenizer):
8
+
9
+ def signature(self):
10
+ return 're'
11
+
12
+ def __init__(self):
13
+ self._re = [
14
+ # language-dependent part (assuming Western languages)
15
+ (re.compile(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])'), r' \1 '),
16
+ # tokenize period and comma unless preceded by a digit
17
+ (re.compile(r'([^0-9])([\.,])'), r'\1 \2 '),
18
+ # tokenize period and comma unless followed by a digit
19
+ (re.compile(r'([\.,])([^0-9])'), r' \1 \2'),
20
+ # tokenize dash when preceded by a digit
21
+ (re.compile(r'([0-9])(-)'), r'\1 \2 '),
22
+ # one space only between words
23
+ # NOTE: Doing this in Python (below) is faster
24
+ # (re.compile(r'\s+'), r' '),
25
+ ]
26
+
27
+ @lru_cache(maxsize=2**16)
28
+ def __call__(self, line):
29
+ """Common post-processing tokenizer for `13a` and `zh` tokenizers.
30
+
31
+ :param line: a segment to tokenize
32
+ :return: the tokenized line
33
+ """
34
+ for (_re, repl) in self._re:
35
+ line = _re.sub(repl, line)
36
+
37
+ # no leading or trailing spaces, single space within words
38
+ return ' '.join(line.split())
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_spm.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import os
4
+ import logging
5
+
6
+ from functools import lru_cache
7
+ from ..utils import SACREBLEU_DIR, download_file
8
+ from .tokenizer_base import BaseTokenizer
9
+
10
+ sacrelogger = logging.getLogger('sacrebleu')
11
+
12
+
13
+ SPM_MODELS = {
14
+ "spm": {
15
+ "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model",
16
+ "signature": "flores101",
17
+ },
18
+ # same as the default of "spm"
19
+ "flores101": {
20
+ "url": "https://dl.fbaipublicfiles.com/fairseq/models/flores/sacrebleu_tokenizer_spm.model",
21
+ "signature": "flores101",
22
+ },
23
+ "flores200": {
24
+ "url": "https://tinyurl.com/flores200sacrebleuspm",
25
+ "signature": "flores200",
26
+ },
27
+ }
28
+
29
+ class TokenizerSPM(BaseTokenizer):
30
+ def signature(self):
31
+ return self.name
32
+
33
+ def __init__(self, key="spm"):
34
+ self.name = SPM_MODELS[key]["signature"]
35
+
36
+ if key == "spm":
37
+ sacrelogger.warn("Tokenizer 'spm' has been changed to 'flores101', and may be removed in the future.")
38
+
39
+ try:
40
+ import sentencepiece as spm
41
+ except (ImportError, ModuleNotFoundError):
42
+ raise ImportError(
43
+ '\n\nPlease install the sentencepiece library for SPM tokenization:'
44
+ '\n\n pip install sentencepiece '
45
+ )
46
+ self.sp = spm.SentencePieceProcessor()
47
+
48
+ model_path = os.path.join(SACREBLEU_DIR, "models", os.path.basename(SPM_MODELS[key]["url"]))
49
+ if not os.path.exists(model_path):
50
+ url = SPM_MODELS[self.name]["url"]
51
+ download_file(url, model_path)
52
+ self.sp.Load(model_path)
53
+
54
+ @lru_cache(maxsize=2**16)
55
+ def __call__(self, line):
56
+ """Tokenizes all the characters in the input line.
57
+
58
+ :param line: a segment to tokenize
59
+ :return: the tokenized line
60
+ """
61
+ return " ".join(self.sp.EncodeAsPieces(line))
62
+
63
+
64
+ class Flores200Tokenizer(TokenizerSPM):
65
+ def __init__(self):
66
+ super().__init__("flores200")
67
+
68
+ class Flores101Tokenizer(TokenizerSPM):
69
+ def __init__(self):
70
+ super().__init__("flores101")
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_ter.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 Memsource
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import re
17
+ from functools import lru_cache
18
+
19
+ from .tokenizer_base import BaseTokenizer
20
+
21
+
22
+ def _normalize_general_and_western(sent: str) -> str:
23
+ # language-independent (general) part
24
+
25
+ # strip end-of-line hyphenation and join lines
26
+ sent = re.sub(r"\n-", "", sent)
27
+
28
+ # join lines
29
+ sent = re.sub(r"\n", " ", sent)
30
+
31
+ # handle XML escaped symbols
32
+ sent = re.sub(r"&quot;", "\"", sent)
33
+ sent = re.sub(r"&amp;", "&", sent)
34
+ sent = re.sub(r"&lt;", "<", sent)
35
+ sent = re.sub(r"&gt;", ">", sent)
36
+
37
+ # language-dependent (Western) part
38
+ sent = f" {sent} "
39
+
40
+ # tokenize punctuation
41
+ sent = re.sub(r"([{-~[-` -&(-+:-@/])", r" \1 ", sent)
42
+
43
+ # handle possesives
44
+ sent = re.sub(r"'s ", r" 's ", sent)
45
+ sent = re.sub(r"'s$", r" 's", sent)
46
+
47
+ # tokenize period and comma unless preceded by a digit
48
+ sent = re.sub(r"([^0-9])([\.,])", r"\1 \2 ", sent)
49
+
50
+ # tokenize period and comma unless followed by a digit
51
+ sent = re.sub(r"([\.,])([^0-9])", r" \1 \2", sent)
52
+
53
+ # tokenize dash when preceded by a digit
54
+ sent = re.sub(r"([0-9])(-)", r"\1 \2 ", sent)
55
+
56
+ return sent
57
+
58
+
59
+ def _normalize_asian(sent: str) -> str:
60
+ # Split Chinese chars and Japanese kanji down to character level
61
+
62
+ # 4E00—9FFF CJK Unified Ideographs
63
+ # 3400—4DBF CJK Unified Ideographs Extension A
64
+ sent = re.sub(r"([\u4e00-\u9fff\u3400-\u4dbf])", r" \1 ", sent)
65
+
66
+ # 31C0—31EF CJK Strokes
67
+ # 2E80—2EFF CJK Radicals Supplement
68
+ sent = re.sub(r"([\u31c0-\u31ef\u2e80-\u2eff])", r" \1 ", sent)
69
+
70
+ # 3300—33FF CJK Compatibility
71
+ # F900—FAFF CJK Compatibility Ideographs
72
+ # FE30—FE4F CJK Compatibility Forms
73
+ sent = re.sub(
74
+ r"([\u3300-\u33ff\uf900-\ufaff\ufe30-\ufe4f])", r" \1 ", sent)
75
+
76
+ # 3200—32FF Enclosed CJK Letters and Months
77
+ sent = re.sub(r"([\u3200-\u3f22])", r" \1 ", sent)
78
+
79
+ # Split Hiragana, Katakana, and KatakanaPhoneticExtensions
80
+ # only when adjacent to something else
81
+ # 3040—309F Hiragana
82
+ # 30A0—30FF Katakana
83
+ # 31F0—31FF Katakana Phonetic Extensions
84
+ sent = re.sub(
85
+ r"(^|^[\u3040-\u309f])([\u3040-\u309f]+)(?=$|^[\u3040-\u309f])",
86
+ r"\1 \2 ", sent)
87
+ sent = re.sub(
88
+ r"(^|^[\u30a0-\u30ff])([\u30a0-\u30ff]+)(?=$|^[\u30a0-\u30ff])",
89
+ r"\1 \2 ", sent)
90
+ sent = re.sub(
91
+ r"(^|^[\u31f0-\u31ff])([\u31f0-\u31ff]+)(?=$|^[\u31f0-\u31ff])",
92
+ r"\1 \2 ", sent)
93
+
94
+ sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r" \1 ", sent)
95
+ sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r" \1 ", sent)
96
+ return sent
97
+
98
+
99
+ def _remove_punct(sent: str) -> str:
100
+ return re.sub(r"[\.,\?:;!\"\(\)]", "", sent)
101
+
102
+
103
+ def _remove_asian_punct(sent: str) -> str:
104
+ sent = re.sub(TercomTokenizer.ASIAN_PUNCT, r"", sent)
105
+ sent = re.sub(TercomTokenizer.FULL_WIDTH_PUNCT, r"", sent)
106
+ return sent
107
+
108
+
109
+ class TercomTokenizer(BaseTokenizer):
110
+ """Re-implementation of Tercom Tokenizer in Python 3.
111
+
112
+ See src/ter/core/Normalizer.java in https://github.com/jhclark/tercom
113
+
114
+ Note that Python doesn't support named Unicode blocks so the mapping for
115
+ relevant blocks was taken from here:
116
+
117
+ https://unicode-table.com/en/blocks/
118
+ """
119
+ ASIAN_PUNCT = r"([\u3001\u3002\u3008-\u3011\u3014-\u301f\uff61-\uff65\u30fb])"
120
+ FULL_WIDTH_PUNCT = r"([\uff0e\uff0c\uff1f\uff1a\uff1b\uff01\uff02\uff08\uff09])"
121
+
122
+ def __init__(self,
123
+ normalized: bool = False,
124
+ no_punct: bool = False,
125
+ asian_support: bool = False,
126
+ case_sensitive: bool = False):
127
+ """Initialize the tokenizer.
128
+
129
+ :param normalized: Enable character normalization. By default, normalizes a couple of things such as
130
+ newlines being stripped, retrieving XML encoded characters, and fixing tokenization for punctuation. When
131
+ 'asian_support' is enabled, also normalizes specific Asian (CJK) character sequences, i.e.
132
+ split them down to the character level.
133
+ :param no_punct: Remove punctuation. Can be used in conjunction with 'asian_support' to also remove typical
134
+ punctuation markers in Asian languages (CJK).
135
+ :param asian_support: Enable special treatment of Asian characters. This option only has an effect when
136
+ 'normalized' and/or 'no_punct' is enabled. If 'normalized' is also enabled, then Asian (CJK)
137
+ characters are split down to the character level. If 'no_punct' is enabled alongside 'asian_support',
138
+ specific unicode ranges for CJK and full-width punctuations are also removed.
139
+ :param case_sensitive: Enable case sensitivity, i.e., do not lower case data.
140
+ """
141
+ self._normalized = normalized
142
+ self._no_punct = no_punct
143
+ self._asian_support = asian_support
144
+ self._case_sensitive = case_sensitive
145
+
146
+ @lru_cache(maxsize=2**16)
147
+ # Although the cache is shared across different instances, same sentence
148
+ # queries do not return invalid returns across different instances since
149
+ # `self` becomes part of the query as well.
150
+ def __call__(self, sent: str) -> str:
151
+ if not sent:
152
+ return ""
153
+
154
+ if not self._case_sensitive:
155
+ sent = sent.lower()
156
+
157
+ if self._normalized:
158
+ sent = _normalize_general_and_western(sent)
159
+ if self._asian_support:
160
+ sent = _normalize_asian(sent)
161
+
162
+ if self._no_punct:
163
+ sent = _remove_punct(sent)
164
+ if self._asian_support:
165
+ sent = _remove_asian_punct(sent)
166
+
167
+ # Strip extra whitespaces
168
+ return ' '.join(sent.split())
169
+
170
+ def signature(self):
171
+ return 'tercom'
llmeval-env/lib/python3.10/site-packages/sacrebleu/tokenizers/tokenizer_zh.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License"). You may not
4
+ # use this file except in compliance with the License. A copy of the License
5
+ # is located at
6
+ #
7
+ # http://aws.amazon.com/apache2.0/
8
+ #
9
+ # or in the "license" file accompanying this file. This file is distributed on
10
+ # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11
+ # express or implied. See the License for the specific language governing
12
+ # permissions and limitations under the License.
13
+
14
+ ##############
15
+
16
+ # MIT License
17
+ # Copyright (c) 2017 - Shujian Huang <[email protected]>
18
+
19
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
20
+ # of this software and associated documentation files (the "Software"), to deal
21
+ # in the Software without restriction, including without limitation the rights
22
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
23
+ # copies of the Software, and to permit persons to whom the Software is
24
+ # furnished to do so, subject to the following conditions:
25
+
26
+ # The above copyright notice and this permission notice shall be included in
27
+ # all copies or substantial portions of the Software.
28
+
29
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35
+ # SOFTWARE.
36
+
37
+ # Author: Shujian Huang [email protected]
38
+
39
+
40
+ from functools import lru_cache
41
+
42
+ from .tokenizer_base import BaseTokenizer
43
+ from .tokenizer_re import TokenizerRegexp
44
+
45
+ _UCODE_RANGES = [
46
+ (u'\u3400', u'\u4db5'), # CJK Unified Ideographs Extension A, release 3.0
47
+ (u'\u4e00', u'\u9fa5'), # CJK Unified Ideographs, release 1.1
48
+ (u'\u9fa6', u'\u9fbb'), # CJK Unified Ideographs, release 4.1
49
+ (u'\uf900', u'\ufa2d'), # CJK Compatibility Ideographs, release 1.1
50
+ (u'\ufa30', u'\ufa6a'), # CJK Compatibility Ideographs, release 3.2
51
+ (u'\ufa70', u'\ufad9'), # CJK Compatibility Ideographs, release 4.1
52
+ (u'\u20000', u'\u2a6d6'), # (UTF16) CJK Unified Ideographs Extension B, release 3.1
53
+ (u'\u2f800', u'\u2fa1d'), # (UTF16) CJK Compatibility Supplement, release 3.1
54
+ (u'\uff00', u'\uffef'), # Full width ASCII, full width of English punctuation,
55
+ # half width Katakana, half wide half width kana, Korean alphabet
56
+ (u'\u2e80', u'\u2eff'), # CJK Radicals Supplement
57
+ (u'\u3000', u'\u303f'), # CJK punctuation mark
58
+ (u'\u31c0', u'\u31ef'), # CJK stroke
59
+ (u'\u2f00', u'\u2fdf'), # Kangxi Radicals
60
+ (u'\u2ff0', u'\u2fff'), # Chinese character structure
61
+ (u'\u3100', u'\u312f'), # Phonetic symbols
62
+ (u'\u31a0', u'\u31bf'), # Phonetic symbols (Taiwanese and Hakka expansion)
63
+ (u'\ufe10', u'\ufe1f'),
64
+ (u'\ufe30', u'\ufe4f'),
65
+ (u'\u2600', u'\u26ff'),
66
+ (u'\u2700', u'\u27bf'),
67
+ (u'\u3200', u'\u32ff'),
68
+ (u'\u3300', u'\u33ff'),
69
+ ]
70
+
71
+
72
+ class TokenizerZh(BaseTokenizer):
73
+
74
+ def signature(self):
75
+ return 'zh'
76
+
77
+ def __init__(self):
78
+ self._post_tokenizer = TokenizerRegexp()
79
+
80
+ @staticmethod
81
+ @lru_cache(maxsize=2**16)
82
+ def _is_chinese_char(uchar):
83
+ """
84
+ :param uchar: input char in unicode
85
+ :return: whether the input char is a Chinese character.
86
+ """
87
+ for start, end in _UCODE_RANGES:
88
+ if start <= uchar <= end:
89
+ return True
90
+ return False
91
+
92
+ @lru_cache(maxsize=2**16)
93
+ def __call__(self, line):
94
+ """The tokenization of Chinese text in this script contains two
95
+ steps: separate each Chinese characters (by utf-8 encoding); tokenize
96
+ the non Chinese part (following the `13a` i.e. mteval tokenizer).
97
+
98
+ Author: Shujian Huang [email protected]
99
+
100
+ :param line: input sentence
101
+ :return: tokenized sentence
102
+ """
103
+
104
+ line = line.strip()
105
+ line_in_chars = ""
106
+
107
+ # TODO: the below code could probably be replaced with the following:
108
+ # @ozan: Gives slightly different scores, need to investigate
109
+ # import regex
110
+ # line = regex.sub(r'(\p{Han})', r' \1 ', line)
111
+ for char in line:
112
+ if self._is_chinese_char(char):
113
+ line_in_chars += " "
114
+ line_in_chars += char
115
+ line_in_chars += " "
116
+ else:
117
+ line_in_chars += char
118
+
119
+ return self._post_tokenizer(line_in_chars)