Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_185_mp_rank_03_optim_states.pt +3 -0
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_46_mp_rank_03_optim_states.pt +3 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py +20 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py +169 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py +68 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py +94 -0
- venv/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py +200 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_deprecation.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_fixes.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_token.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_validators.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__init__.py +43 -0
- venv/lib/python3.10/site-packages/sacrebleu/__main__.py +27 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/compat.py +205 -0
- venv/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py +0 -0
- venv/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py +45 -0
- venv/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py +8 -0
- venv/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py +36 -0
- venv/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py +61 -0
- venv/lib/python3.10/site-packages/sacrebleu/py.typed +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_185_mp_rank_03_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f11ad3c628f3b289310485e8b4e321494534e97bf89b0783ae3e28e7190e95c
|
3 |
+
size 41830468
|
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_46_mp_rank_03_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5adc5ccdc0be2c82049e8e3acc48c781f74f0e7e2d9fe9a3c8b7e6e12318ec4
|
3 |
+
size 41830330
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ruff: noqa: F401
|
15 |
+
"""Contains helpers to serialize tensors."""
|
16 |
+
|
17 |
+
from ._base import StateDictSplit, split_state_dict_into_shards_factory
|
18 |
+
from ._numpy import split_numpy_state_dict_into_shards
|
19 |
+
from ._tensorflow import split_tf_state_dict_into_shards
|
20 |
+
from ._torch import split_torch_state_dict_into_shards
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (540 Bytes). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (4.7 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_numpy.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_tensorflow.cpython-310.pyc
ADDED
Binary file (3.09 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/__pycache__/_torch.cpython-310.pyc
ADDED
Binary file (6.5 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/_base.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains helpers to split tensors into shards."""
|
15 |
+
|
16 |
+
from dataclasses import dataclass, field
|
17 |
+
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
18 |
+
|
19 |
+
from .. import logging
|
20 |
+
|
21 |
+
|
22 |
+
TensorT = TypeVar("TensorT")
|
23 |
+
TensorSizeFn_T = Callable[[TensorT], int]
|
24 |
+
StorageIDFn_T = Callable[[TensorT], Optional[Any]]
|
25 |
+
|
26 |
+
MAX_SHARD_SIZE = 5_000_000_000 # 5GB
|
27 |
+
FILENAME_PATTERN = "model{suffix}.safetensors"
|
28 |
+
|
29 |
+
logger = logging.get_logger(__file__)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class StateDictSplit:
|
34 |
+
is_sharded: bool = field(init=False)
|
35 |
+
metadata: Dict[str, Any]
|
36 |
+
filename_to_tensors: Dict[str, List[str]]
|
37 |
+
tensor_to_filename: Dict[str, str]
|
38 |
+
|
39 |
+
def __post_init__(self):
|
40 |
+
self.is_sharded = len(self.filename_to_tensors) > 1
|
41 |
+
|
42 |
+
|
43 |
+
def split_state_dict_into_shards_factory(
|
44 |
+
state_dict: Dict[str, TensorT],
|
45 |
+
*,
|
46 |
+
get_tensor_size: TensorSizeFn_T,
|
47 |
+
get_storage_id: StorageIDFn_T = lambda tensor: None,
|
48 |
+
filename_pattern: str = FILENAME_PATTERN,
|
49 |
+
max_shard_size: int = MAX_SHARD_SIZE,
|
50 |
+
) -> StateDictSplit:
|
51 |
+
"""
|
52 |
+
Split a model state dictionary in shards so that each shard is smaller than a given size.
|
53 |
+
|
54 |
+
The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
|
55 |
+
made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
|
56 |
+
have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
|
57 |
+
[6+2+2GB], [6+2GB], [6GB].
|
58 |
+
|
59 |
+
<Tip warning={true}>
|
60 |
+
|
61 |
+
If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
|
62 |
+
size greater than `max_shard_size`.
|
63 |
+
|
64 |
+
</Tip>
|
65 |
+
|
66 |
+
Args:
|
67 |
+
state_dict (`Dict[str, Tensor]`):
|
68 |
+
The state dictionary to save.
|
69 |
+
get_tensor_size (`Callable[[Tensor], int]`):
|
70 |
+
A function that returns the size of a tensor in bytes.
|
71 |
+
get_storage_id (`Callable[[Tensor], Optional[Any]]`, *optional*):
|
72 |
+
A function that returns a unique identifier to a tensor storage. Multiple different tensors can share the
|
73 |
+
same underlying storage. This identifier is guaranteed to be unique and constant for this tensor's storage
|
74 |
+
during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id.
|
75 |
+
filename_pattern (`str`, *optional*):
|
76 |
+
The pattern to generate the files names in which the model will be saved. Pattern must be a string that
|
77 |
+
can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
|
78 |
+
Defaults to `"model{suffix}.safetensors"`.
|
79 |
+
max_shard_size (`int` or `str`, *optional*):
|
80 |
+
The maximum size of each shard, in bytes. Defaults to 5GB.
|
81 |
+
|
82 |
+
Returns:
|
83 |
+
[`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
|
84 |
+
"""
|
85 |
+
storage_id_to_tensors: Dict[Any, List[str]] = {}
|
86 |
+
|
87 |
+
shard_list: List[Dict[str, TensorT]] = []
|
88 |
+
current_shard: Dict[str, TensorT] = {}
|
89 |
+
current_shard_size = 0
|
90 |
+
total_size = 0
|
91 |
+
|
92 |
+
for key, tensor in state_dict.items():
|
93 |
+
# when bnb serialization is used the weights in the state dict can be strings
|
94 |
+
# check: https://github.com/huggingface/transformers/pull/24416 for more details
|
95 |
+
if isinstance(tensor, str):
|
96 |
+
logger.info("Skipping tensor %s as it is a string (bnb serialization)", key)
|
97 |
+
continue
|
98 |
+
|
99 |
+
# If a `tensor` shares the same underlying storage as another tensor, we put `tensor` in the same `block`
|
100 |
+
storage_id = get_storage_id(tensor)
|
101 |
+
if storage_id is not None:
|
102 |
+
if storage_id in storage_id_to_tensors:
|
103 |
+
# We skip this tensor for now and will reassign to correct shard later
|
104 |
+
storage_id_to_tensors[storage_id].append(key)
|
105 |
+
continue
|
106 |
+
else:
|
107 |
+
# This is the first tensor with this storage_id, we create a new entry
|
108 |
+
# in the storage_id_to_tensors dict => we will assign the shard id later
|
109 |
+
storage_id_to_tensors[storage_id] = [key]
|
110 |
+
|
111 |
+
# Compute tensor size
|
112 |
+
tensor_size = get_tensor_size(tensor)
|
113 |
+
|
114 |
+
# If this tensor is bigger than the maximal size, we put it in its own shard
|
115 |
+
if tensor_size > max_shard_size:
|
116 |
+
total_size += tensor_size
|
117 |
+
shard_list.append({key: tensor})
|
118 |
+
continue
|
119 |
+
|
120 |
+
# If this tensor is going to tip up over the maximal size, we split.
|
121 |
+
# Current shard already has some tensors, we add it to the list of shards and create a new one.
|
122 |
+
if current_shard_size + tensor_size > max_shard_size:
|
123 |
+
shard_list.append(current_shard)
|
124 |
+
current_shard = {}
|
125 |
+
current_shard_size = 0
|
126 |
+
|
127 |
+
# Add the tensor to the current shard
|
128 |
+
current_shard[key] = tensor
|
129 |
+
current_shard_size += tensor_size
|
130 |
+
total_size += tensor_size
|
131 |
+
|
132 |
+
# Add the last shard
|
133 |
+
if len(current_shard) > 0:
|
134 |
+
shard_list.append(current_shard)
|
135 |
+
nb_shards = len(shard_list)
|
136 |
+
|
137 |
+
# Loop over the tensors that share the same storage and assign them together
|
138 |
+
for storage_id, keys in storage_id_to_tensors.items():
|
139 |
+
# Let's try to find the shard where the first tensor of this storage is and put all tensors in the same shard
|
140 |
+
for shard in shard_list:
|
141 |
+
if keys[0] in shard:
|
142 |
+
for key in keys:
|
143 |
+
shard[key] = state_dict[key]
|
144 |
+
break
|
145 |
+
|
146 |
+
# If we only have one shard, we return it => no need to build the index
|
147 |
+
if nb_shards == 1:
|
148 |
+
filename = filename_pattern.format(suffix="")
|
149 |
+
return StateDictSplit(
|
150 |
+
metadata={"total_size": total_size},
|
151 |
+
filename_to_tensors={filename: list(state_dict.keys())},
|
152 |
+
tensor_to_filename={key: filename for key in state_dict.keys()},
|
153 |
+
)
|
154 |
+
|
155 |
+
# Now that each tensor is assigned to a shard, let's assign a filename to each shard
|
156 |
+
tensor_name_to_filename = {}
|
157 |
+
filename_to_tensors = {}
|
158 |
+
for idx, shard in enumerate(shard_list):
|
159 |
+
filename = filename_pattern.format(suffix=f"-{idx+1:05d}-of-{nb_shards:05d}")
|
160 |
+
for key in shard:
|
161 |
+
tensor_name_to_filename[key] = filename
|
162 |
+
filename_to_tensors[filename] = list(shard.keys())
|
163 |
+
|
164 |
+
# Build the index and return
|
165 |
+
return StateDictSplit(
|
166 |
+
metadata={"total_size": total_size},
|
167 |
+
filename_to_tensors=filename_to_tensors,
|
168 |
+
tensor_to_filename=tensor_name_to_filename,
|
169 |
+
)
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/_numpy.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains numpy-specific helpers."""
|
15 |
+
|
16 |
+
from typing import TYPE_CHECKING, Dict
|
17 |
+
|
18 |
+
from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
|
19 |
+
|
20 |
+
|
21 |
+
if TYPE_CHECKING:
|
22 |
+
import numpy as np
|
23 |
+
|
24 |
+
|
25 |
+
def split_numpy_state_dict_into_shards(
|
26 |
+
state_dict: Dict[str, "np.ndarray"],
|
27 |
+
*,
|
28 |
+
filename_pattern: str = FILENAME_PATTERN,
|
29 |
+
max_shard_size: int = MAX_SHARD_SIZE,
|
30 |
+
) -> StateDictSplit:
|
31 |
+
"""
|
32 |
+
Split a model state dictionary in shards so that each shard is smaller than a given size.
|
33 |
+
|
34 |
+
The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
|
35 |
+
made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
|
36 |
+
have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
|
37 |
+
[6+2+2GB], [6+2GB], [6GB].
|
38 |
+
|
39 |
+
<Tip warning={true}>
|
40 |
+
|
41 |
+
If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
|
42 |
+
size greater than `max_shard_size`.
|
43 |
+
|
44 |
+
</Tip>
|
45 |
+
|
46 |
+
Args:
|
47 |
+
state_dict (`Dict[str, np.ndarray]`):
|
48 |
+
The state dictionary to save.
|
49 |
+
filename_pattern (`str`, *optional*):
|
50 |
+
The pattern to generate the files names in which the model will be saved. Pattern must be a string that
|
51 |
+
can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
|
52 |
+
Defaults to `"model{suffix}.safetensors"`.
|
53 |
+
max_shard_size (`int` or `str`, *optional*):
|
54 |
+
The maximum size of each shard, in bytes. Defaults to 5GB.
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
[`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
|
58 |
+
"""
|
59 |
+
return split_state_dict_into_shards_factory(
|
60 |
+
state_dict,
|
61 |
+
max_shard_size=max_shard_size,
|
62 |
+
filename_pattern=filename_pattern,
|
63 |
+
get_tensor_size=get_tensor_size,
|
64 |
+
)
|
65 |
+
|
66 |
+
|
67 |
+
def get_tensor_size(tensor: "np.ndarray") -> int:
|
68 |
+
return tensor.nbytes
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/_tensorflow.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains tensorflow-specific helpers."""
|
15 |
+
|
16 |
+
import math
|
17 |
+
import re
|
18 |
+
from typing import TYPE_CHECKING, Dict
|
19 |
+
|
20 |
+
from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
|
21 |
+
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
import tensorflow as tf
|
25 |
+
|
26 |
+
|
27 |
+
def split_tf_state_dict_into_shards(
|
28 |
+
state_dict: Dict[str, "tf.Tensor"],
|
29 |
+
*,
|
30 |
+
filename_pattern: str = "tf_model{suffix}.h5",
|
31 |
+
max_shard_size: int = MAX_SHARD_SIZE,
|
32 |
+
) -> StateDictSplit:
|
33 |
+
"""
|
34 |
+
Split a model state dictionary in shards so that each shard is smaller than a given size.
|
35 |
+
|
36 |
+
The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
|
37 |
+
made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
|
38 |
+
have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
|
39 |
+
[6+2+2GB], [6+2GB], [6GB].
|
40 |
+
|
41 |
+
<Tip warning={true}>
|
42 |
+
|
43 |
+
If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
|
44 |
+
size greater than `max_shard_size`.
|
45 |
+
|
46 |
+
</Tip>
|
47 |
+
|
48 |
+
Args:
|
49 |
+
state_dict (`Dict[str, Tensor]`):
|
50 |
+
The state dictionary to save.
|
51 |
+
filename_pattern (`str`, *optional*):
|
52 |
+
The pattern to generate the files names in which the model will be saved. Pattern must be a string that
|
53 |
+
can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
|
54 |
+
Defaults to `"tf_model{suffix}.h5"`.
|
55 |
+
max_shard_size (`int` or `str`, *optional*):
|
56 |
+
The maximum size of each shard, in bytes. Defaults to 5GB.
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
[`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
|
60 |
+
"""
|
61 |
+
return split_state_dict_into_shards_factory(
|
62 |
+
state_dict,
|
63 |
+
max_shard_size=max_shard_size,
|
64 |
+
filename_pattern=filename_pattern,
|
65 |
+
get_tensor_size=get_tensor_size,
|
66 |
+
)
|
67 |
+
|
68 |
+
|
69 |
+
def get_tensor_size(tensor: "tf.Tensor") -> int:
|
70 |
+
# Return `math.ceil` since dtype byte size can be a float (e.g., 0.125 for tf.bool).
|
71 |
+
# Better to overestimate than underestimate.
|
72 |
+
return math.ceil(tensor.numpy().size * _dtype_byte_size_tf(tensor.dtype))
|
73 |
+
|
74 |
+
|
75 |
+
def _dtype_byte_size_tf(dtype) -> float:
|
76 |
+
"""
|
77 |
+
Returns the size (in bytes) occupied by one parameter of type `dtype`.
|
78 |
+
Taken from https://github.com/huggingface/transformers/blob/74d9d0cebb0263a3f8ab9c280569170cc74651d0/src/transformers/modeling_tf_utils.py#L608.
|
79 |
+
NOTE: why not `tensor.numpy().nbytes`?
|
80 |
+
Example:
|
81 |
+
```py
|
82 |
+
>>> _dtype_byte_size(tf.float32)
|
83 |
+
4
|
84 |
+
```
|
85 |
+
"""
|
86 |
+
import tensorflow as tf
|
87 |
+
|
88 |
+
if dtype == tf.bool:
|
89 |
+
return 1 / 8
|
90 |
+
bit_search = re.search(r"[^\d](\d+)$", dtype.name)
|
91 |
+
if bit_search is None:
|
92 |
+
raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
|
93 |
+
bit_size = int(bit_search.groups()[0])
|
94 |
+
return bit_size // 8
|
venv/lib/python3.10/site-packages/huggingface_hub/serialization/_torch.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Contains pytorch-specific helpers."""
|
15 |
+
|
16 |
+
import importlib
|
17 |
+
from functools import lru_cache
|
18 |
+
from typing import TYPE_CHECKING, Dict, Tuple
|
19 |
+
|
20 |
+
from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
|
21 |
+
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
import torch
|
25 |
+
|
26 |
+
|
27 |
+
def split_torch_state_dict_into_shards(
|
28 |
+
state_dict: Dict[str, "torch.Tensor"],
|
29 |
+
*,
|
30 |
+
filename_pattern: str = FILENAME_PATTERN,
|
31 |
+
max_shard_size: int = MAX_SHARD_SIZE,
|
32 |
+
) -> StateDictSplit:
|
33 |
+
"""
|
34 |
+
Split a model state dictionary in shards so that each shard is smaller than a given size.
|
35 |
+
|
36 |
+
The shards are determined by iterating through the `state_dict` in the order of its keys. There is no optimization
|
37 |
+
made to make each shard as close as possible to the maximum size passed. For example, if the limit is 10GB and we
|
38 |
+
have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
|
39 |
+
[6+2+2GB], [6+2GB], [6GB].
|
40 |
+
|
41 |
+
<Tip warning={true}>
|
42 |
+
|
43 |
+
If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
|
44 |
+
size greater than `max_shard_size`.
|
45 |
+
|
46 |
+
</Tip>
|
47 |
+
|
48 |
+
Args:
|
49 |
+
state_dict (`Dict[str, torch.Tensor]`):
|
50 |
+
The state dictionary to save.
|
51 |
+
filename_pattern (`str`, *optional*):
|
52 |
+
The pattern to generate the files names in which the model will be saved. Pattern must be a string that
|
53 |
+
can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
|
54 |
+
Defaults to `"model{suffix}.safetensors"`.
|
55 |
+
max_shard_size (`int` or `str`, *optional*):
|
56 |
+
The maximum size of each shard, in bytes. Defaults to 5GB.
|
57 |
+
|
58 |
+
Returns:
|
59 |
+
[`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
|
60 |
+
|
61 |
+
Example:
|
62 |
+
```py
|
63 |
+
>>> import json
|
64 |
+
>>> import os
|
65 |
+
>>> from safetensors.torch import save_file as safe_save_file
|
66 |
+
>>> from huggingface_hub import split_torch_state_dict_into_shards
|
67 |
+
|
68 |
+
>>> def save_state_dict(state_dict: Dict[str, torch.Tensor], save_directory: str):
|
69 |
+
... state_dict_split = split_torch_state_dict_into_shards(state_dict)
|
70 |
+
... for filename, tensors in state_dict_split.filename_to_tensors.values():
|
71 |
+
... shard = {tensor: state_dict[tensor] for tensor in tensors}
|
72 |
+
... safe_save_file(
|
73 |
+
... shard,
|
74 |
+
... os.path.join(save_directory, filename),
|
75 |
+
... metadata={"format": "pt"},
|
76 |
+
... )
|
77 |
+
... if state_dict_split.is_sharded:
|
78 |
+
... index = {
|
79 |
+
... "metadata": state_dict_split.metadata,
|
80 |
+
... "weight_map": state_dict_split.tensor_to_filename,
|
81 |
+
... }
|
82 |
+
... with open(os.path.join(save_directory, "model.safetensors.index.json"), "w") as f:
|
83 |
+
... f.write(json.dumps(index, indent=2))
|
84 |
+
```
|
85 |
+
"""
|
86 |
+
return split_state_dict_into_shards_factory(
|
87 |
+
state_dict,
|
88 |
+
max_shard_size=max_shard_size,
|
89 |
+
filename_pattern=filename_pattern,
|
90 |
+
get_tensor_size=get_tensor_size,
|
91 |
+
get_storage_id=get_storage_id,
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
def get_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]:
|
96 |
+
"""
|
97 |
+
Return unique identifier to a tensor storage.
|
98 |
+
|
99 |
+
Multiple different tensors can share the same underlying storage. For
|
100 |
+
example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
|
101 |
+
guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
|
102 |
+
non-overlapping lifetimes may have the same id.
|
103 |
+
|
104 |
+
Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/pytorch_utils.py#L278.
|
105 |
+
"""
|
106 |
+
if tensor.device.type == "xla" and is_torch_tpu_available():
|
107 |
+
# NOTE: xla tensors dont have storage
|
108 |
+
# use some other unique id to distinguish.
|
109 |
+
# this is a XLA tensor, it must be created using torch_xla's
|
110 |
+
# device. So the following import is safe:
|
111 |
+
import torch_xla
|
112 |
+
|
113 |
+
unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor)
|
114 |
+
else:
|
115 |
+
unique_id = storage_ptr(tensor)
|
116 |
+
|
117 |
+
return tensor.device, unique_id, get_storage_size(tensor)
|
118 |
+
|
119 |
+
|
120 |
+
def get_tensor_size(tensor: "torch.Tensor") -> int:
|
121 |
+
return tensor.numel() * tensor.element_size()
|
122 |
+
|
123 |
+
|
124 |
+
@lru_cache()
|
125 |
+
def is_torch_tpu_available(check_device=True):
|
126 |
+
"""
|
127 |
+
Checks if `torch_xla` is installed and potentially if a TPU is in the environment
|
128 |
+
|
129 |
+
Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/utils/import_utils.py#L463.
|
130 |
+
"""
|
131 |
+
if importlib.util.find_spec("torch_xla") is not None:
|
132 |
+
if check_device:
|
133 |
+
# We need to check if `xla_device` can be found, will raise a RuntimeError if not
|
134 |
+
try:
|
135 |
+
import torch_xla.core.xla_model as xm
|
136 |
+
|
137 |
+
_ = xm.xla_device()
|
138 |
+
return True
|
139 |
+
except RuntimeError:
|
140 |
+
return False
|
141 |
+
return True
|
142 |
+
return False
|
143 |
+
|
144 |
+
|
145 |
+
def storage_ptr(tensor: "torch.Tensor") -> int:
|
146 |
+
"""
|
147 |
+
Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L11C1-L20C21.
|
148 |
+
"""
|
149 |
+
try:
|
150 |
+
return tensor.untyped_storage().data_ptr()
|
151 |
+
except Exception:
|
152 |
+
# Fallback for torch==1.10
|
153 |
+
try:
|
154 |
+
return tensor.storage().data_ptr()
|
155 |
+
except NotImplementedError:
|
156 |
+
# Fallback for meta storage
|
157 |
+
return 0
|
158 |
+
|
159 |
+
|
160 |
+
def get_storage_size(tensor: "torch.Tensor") -> int:
|
161 |
+
"""
|
162 |
+
Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L31C1-L41C59
|
163 |
+
"""
|
164 |
+
try:
|
165 |
+
return tensor.untyped_storage().nbytes()
|
166 |
+
except AttributeError:
|
167 |
+
# Fallback for torch==1.10
|
168 |
+
try:
|
169 |
+
return tensor.storage().size() * _get_dtype_size(tensor.dtype)
|
170 |
+
except NotImplementedError:
|
171 |
+
# Fallback for meta storage
|
172 |
+
# On torch >=2.0 this is the tensor size
|
173 |
+
return tensor.nelement() * _get_dtype_size(tensor.dtype)
|
174 |
+
|
175 |
+
|
176 |
+
@lru_cache()
|
177 |
+
def _get_dtype_size(dtype: "torch.dtype") -> int:
|
178 |
+
"""
|
179 |
+
Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L344
|
180 |
+
"""
|
181 |
+
import torch
|
182 |
+
|
183 |
+
# torch.float8 formats require 2.1; we do not support these dtypes on earlier versions
|
184 |
+
_float8_e4m3fn = getattr(torch, "float8_e4m3fn", None)
|
185 |
+
_float8_e5m2 = getattr(torch, "float8_e5m2", None)
|
186 |
+
_SIZE = {
|
187 |
+
torch.int64: 8,
|
188 |
+
torch.float32: 4,
|
189 |
+
torch.int32: 4,
|
190 |
+
torch.bfloat16: 2,
|
191 |
+
torch.float16: 2,
|
192 |
+
torch.int16: 2,
|
193 |
+
torch.uint8: 1,
|
194 |
+
torch.int8: 1,
|
195 |
+
torch.bool: 1,
|
196 |
+
torch.float64: 8,
|
197 |
+
_float8_e4m3fn: 1,
|
198 |
+
_float8_e5m2: 1,
|
199 |
+
}
|
200 |
+
return _SIZE[dtype]
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.54 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-310.pyc
ADDED
Binary file (5.1 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-310.pyc
ADDED
Binary file (25 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-310.pyc
ADDED
Binary file (1.74 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_deprecation.cpython-310.pyc
ADDED
Binary file (4.96 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_errors.cpython-310.pyc
ADDED
Binary file (13.1 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-310.pyc
ADDED
Binary file (1.93 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_fixes.cpython-310.pyc
ADDED
Binary file (2.34 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc
ADDED
Binary file (4.13 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc
ADDED
Binary file (2.72 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-310.pyc
ADDED
Binary file (11.5 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-310.pyc
ADDED
Binary file (9.31 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc
ADDED
Binary file (5.6 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-310.pyc
ADDED
Binary file (3.93 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-310.pyc
ADDED
Binary file (4.4 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_token.cpython-310.pyc
ADDED
Binary file (3.93 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (1.72 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_validators.cpython-310.pyc
ADDED
Binary file (8.15 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-310.pyc
ADDED
Binary file (9.24 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-310.pyc
ADDED
Binary file (4.65 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-310.pyc
ADDED
Binary file (1.09 kB). View file
|
|
venv/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-310.pyc
ADDED
Binary file (5.61 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__init__.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
|
7 |
+
# use this file except in compliance with the License. A copy of the License
|
8 |
+
# is located at
|
9 |
+
#
|
10 |
+
# http://aws.amazon.com/apache2.0/
|
11 |
+
#
|
12 |
+
# or in the "license" file accompanying this file. This file is distributed on
|
13 |
+
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
14 |
+
# express or implied. See the License for the specific language governing
|
15 |
+
# permissions and limitations under the License.
|
16 |
+
|
17 |
+
__version__ = '2.4.2'
|
18 |
+
__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU, chrF, and TER scores'
|
19 |
+
|
20 |
+
|
21 |
+
from .utils import smart_open, SACREBLEU_DIR, download_test_set
|
22 |
+
from .utils import get_source_file, get_reference_files
|
23 |
+
from .utils import get_available_testsets, get_langpairs_for_testset
|
24 |
+
from .metrics.helpers import extract_word_ngrams, extract_char_ngrams
|
25 |
+
from .dataset import DATASETS
|
26 |
+
from .metrics import BLEU, CHRF, TER
|
27 |
+
|
28 |
+
# Backward compatibility functions for old style API access (<= 1.4.10)
|
29 |
+
from .compat import corpus_bleu, raw_corpus_bleu, sentence_bleu
|
30 |
+
from .compat import corpus_chrf, sentence_chrf
|
31 |
+
from .compat import corpus_ter, sentence_ter
|
32 |
+
|
33 |
+
__all__ = [
|
34 |
+
'smart_open', 'SACREBLEU_DIR', 'download_test_set',
|
35 |
+
'get_source_file', 'get_reference_files',
|
36 |
+
'get_available_testsets', 'get_langpairs_for_testset',
|
37 |
+
'extract_word_ngrams', 'extract_char_ngrams',
|
38 |
+
'DATASETS',
|
39 |
+
'BLEU', 'CHRF', 'TER',
|
40 |
+
'corpus_bleu', 'raw_corpus_bleu', 'sentence_bleu',
|
41 |
+
'corpus_chrf', 'sentence_chrf',
|
42 |
+
'corpus_ter', 'sentence_ter'
|
43 |
+
]
|
venv/lib/python3.10/site-packages/sacrebleu/__main__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
|
7 |
+
# use this file except in compliance with the License. A copy of the License
|
8 |
+
# is located at
|
9 |
+
#
|
10 |
+
# http://aws.amazon.com/apache2.0/
|
11 |
+
#
|
12 |
+
# or in the "license" file accompanying this file. This file is distributed on
|
13 |
+
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
14 |
+
# express or implied. See the License for the specific language governing
|
15 |
+
# permissions and limitations under the License.
|
16 |
+
|
17 |
+
"""
|
18 |
+
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
|
19 |
+
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
|
20 |
+
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
|
21 |
+
|
22 |
+
See the [README.md] file for more information.
|
23 |
+
"""
|
24 |
+
from .sacrebleu import main
|
25 |
+
|
26 |
+
if __name__ == '__main__':
|
27 |
+
main()
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/__main__.cpython-310.pyc
ADDED
Binary file (647 Bytes). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/compat.cpython-310.pyc
ADDED
Binary file (7.74 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/sacrebleu.cpython-310.pyc
ADDED
Binary file (16.7 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/significance.cpython-310.pyc
ADDED
Binary file (13.8 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
venv/lib/python3.10/site-packages/sacrebleu/compat.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Sequence, Optional
|
2 |
+
|
3 |
+
from .metrics import BLEU, CHRF, TER, BLEUScore, CHRFScore, TERScore
|
4 |
+
|
5 |
+
|
6 |
+
######################################################################
|
7 |
+
# Backward compatibility functions for old style API access (< 1.4.11)
|
8 |
+
######################################################################
|
9 |
+
def corpus_bleu(hypotheses: Sequence[str],
|
10 |
+
references: Sequence[Sequence[str]],
|
11 |
+
smooth_method='exp',
|
12 |
+
smooth_value=None,
|
13 |
+
force=False,
|
14 |
+
lowercase=False,
|
15 |
+
tokenize=BLEU.TOKENIZER_DEFAULT,
|
16 |
+
use_effective_order=False) -> BLEUScore:
|
17 |
+
"""Computes BLEU for a corpus against a single (or multiple) reference(s).
|
18 |
+
This is the main CLI entry point for computing BLEU between a system output
|
19 |
+
and a reference sentence.
|
20 |
+
|
21 |
+
:param hypotheses: A sequence of hypothesis strings.
|
22 |
+
:param references: A sequence of reference documents with document being
|
23 |
+
defined as a sequence of reference strings.
|
24 |
+
:param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
|
25 |
+
:param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
|
26 |
+
:param force: Ignore data that looks already tokenized
|
27 |
+
:param lowercase: Lowercase the data
|
28 |
+
:param tokenize: The tokenizer to use
|
29 |
+
:param use_effective_order: Don't take into account n-gram orders without any match.
|
30 |
+
:return: a `BLEUScore` object
|
31 |
+
"""
|
32 |
+
metric = BLEU(
|
33 |
+
lowercase=lowercase, force=force, tokenize=tokenize,
|
34 |
+
smooth_method=smooth_method, smooth_value=smooth_value,
|
35 |
+
effective_order=use_effective_order)
|
36 |
+
|
37 |
+
return metric.corpus_score(hypotheses, references)
|
38 |
+
|
39 |
+
|
40 |
+
def raw_corpus_bleu(hypotheses: Sequence[str],
|
41 |
+
references: Sequence[Sequence[str]],
|
42 |
+
smooth_value: Optional[float] = BLEU.SMOOTH_DEFAULTS['floor']) -> BLEUScore:
|
43 |
+
"""Computes BLEU for a corpus against a single (or multiple) reference(s).
|
44 |
+
This convenience function assumes a particular set of arguments i.e.
|
45 |
+
it disables tokenization and applies a `floor` smoothing with value `0.1`.
|
46 |
+
|
47 |
+
This convenience call does not apply any tokenization at all,
|
48 |
+
neither to the system output nor the reference. It just computes
|
49 |
+
BLEU on the "raw corpus" (hence the name).
|
50 |
+
|
51 |
+
:param hypotheses: A sequence of hypothesis strings.
|
52 |
+
:param references: A sequence of reference documents with document being
|
53 |
+
defined as a sequence of reference strings.
|
54 |
+
:param smooth_value: The smoothing value for `floor`. If not given, the default of 0.1 is used.
|
55 |
+
:return: Returns a `BLEUScore` object.
|
56 |
+
|
57 |
+
"""
|
58 |
+
return corpus_bleu(
|
59 |
+
hypotheses, references, smooth_method='floor',
|
60 |
+
smooth_value=smooth_value, force=True, tokenize='none',
|
61 |
+
use_effective_order=True)
|
62 |
+
|
63 |
+
|
64 |
+
def sentence_bleu(hypothesis: str,
|
65 |
+
references: Sequence[str],
|
66 |
+
smooth_method: str = 'exp',
|
67 |
+
smooth_value: Optional[float] = None,
|
68 |
+
lowercase: bool = False,
|
69 |
+
tokenize=BLEU.TOKENIZER_DEFAULT,
|
70 |
+
use_effective_order: bool = True) -> BLEUScore:
|
71 |
+
"""
|
72 |
+
Computes BLEU for a single sentence against a single (or multiple) reference(s).
|
73 |
+
|
74 |
+
Disclaimer: Computing BLEU at the sentence level is not its intended use as
|
75 |
+
BLEU is a corpus-level metric.
|
76 |
+
|
77 |
+
:param hypothesis: A single hypothesis string.
|
78 |
+
:param references: A sequence of reference strings.
|
79 |
+
:param smooth_method: The smoothing method to use ('floor', 'add-k', 'exp' or 'none')
|
80 |
+
:param smooth_value: The smoothing value for `floor` and `add-k` methods. `None` falls back to default value.
|
81 |
+
:param lowercase: Lowercase the data
|
82 |
+
:param tokenize: The tokenizer to use
|
83 |
+
:param use_effective_order: Don't take into account n-gram orders without any match.
|
84 |
+
:return: Returns a `BLEUScore` object.
|
85 |
+
"""
|
86 |
+
metric = BLEU(
|
87 |
+
lowercase=lowercase, tokenize=tokenize, force=False,
|
88 |
+
smooth_method=smooth_method, smooth_value=smooth_value,
|
89 |
+
effective_order=use_effective_order)
|
90 |
+
|
91 |
+
return metric.sentence_score(hypothesis, references)
|
92 |
+
|
93 |
+
|
94 |
+
def corpus_chrf(hypotheses: Sequence[str],
|
95 |
+
references: Sequence[Sequence[str]],
|
96 |
+
char_order: int = CHRF.CHAR_ORDER,
|
97 |
+
word_order: int = CHRF.WORD_ORDER,
|
98 |
+
beta: int = CHRF.BETA,
|
99 |
+
remove_whitespace: bool = True,
|
100 |
+
eps_smoothing: bool = False) -> CHRFScore:
|
101 |
+
"""
|
102 |
+
Computes chrF for a corpus against a single (or multiple) reference(s).
|
103 |
+
If `word_order` equals to 2, the metric is referred to as chrF++.
|
104 |
+
|
105 |
+
:param hypotheses: A sequence of hypothesis strings.
|
106 |
+
:param references: A sequence of reference documents with document being
|
107 |
+
defined as a sequence of reference strings.
|
108 |
+
:param char_order: Character n-gram order.
|
109 |
+
:param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
|
110 |
+
:param beta: Determine the importance of recall w.r.t precision.
|
111 |
+
:param eps_smoothing: If `True`, applies epsilon smoothing similar
|
112 |
+
to reference chrF++.py, NLTK and Moses implementations. Otherwise,
|
113 |
+
it takes into account effective match order similar to sacreBLEU < 2.0.0.
|
114 |
+
:param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction.
|
115 |
+
:return: A `CHRFScore` object.
|
116 |
+
"""
|
117 |
+
metric = CHRF(
|
118 |
+
char_order=char_order,
|
119 |
+
word_order=word_order,
|
120 |
+
beta=beta,
|
121 |
+
whitespace=not remove_whitespace,
|
122 |
+
eps_smoothing=eps_smoothing)
|
123 |
+
return metric.corpus_score(hypotheses, references)
|
124 |
+
|
125 |
+
|
126 |
+
def sentence_chrf(hypothesis: str,
|
127 |
+
references: Sequence[str],
|
128 |
+
char_order: int = CHRF.CHAR_ORDER,
|
129 |
+
word_order: int = CHRF.WORD_ORDER,
|
130 |
+
beta: int = CHRF.BETA,
|
131 |
+
remove_whitespace: bool = True,
|
132 |
+
eps_smoothing: bool = False) -> CHRFScore:
|
133 |
+
"""
|
134 |
+
Computes chrF for a single sentence against a single (or multiple) reference(s).
|
135 |
+
If `word_order` equals to 2, the metric is referred to as chrF++.
|
136 |
+
|
137 |
+
:param hypothesis: A single hypothesis string.
|
138 |
+
:param references: A sequence of reference strings.
|
139 |
+
:param char_order: Character n-gram order.
|
140 |
+
:param word_order: Word n-gram order. If equals to 2, the metric is referred to as chrF++.
|
141 |
+
:param beta: Determine the importance of recall w.r.t precision.
|
142 |
+
:param eps_smoothing: If `True`, applies epsilon smoothing similar
|
143 |
+
to reference chrF++.py, NLTK and Moses implementations. Otherwise,
|
144 |
+
it takes into account effective match order similar to sacreBLEU < 2.0.0.
|
145 |
+
:param remove_whitespace: If `True`, removes whitespaces prior to character n-gram extraction.
|
146 |
+
:return: A `CHRFScore` object.
|
147 |
+
"""
|
148 |
+
metric = CHRF(
|
149 |
+
char_order=char_order,
|
150 |
+
word_order=word_order,
|
151 |
+
beta=beta,
|
152 |
+
whitespace=not remove_whitespace,
|
153 |
+
eps_smoothing=eps_smoothing)
|
154 |
+
return metric.sentence_score(hypothesis, references)
|
155 |
+
|
156 |
+
|
157 |
+
def corpus_ter(hypotheses: Sequence[str],
|
158 |
+
references: Sequence[Sequence[str]],
|
159 |
+
normalized: bool = False,
|
160 |
+
no_punct: bool = False,
|
161 |
+
asian_support: bool = False,
|
162 |
+
case_sensitive: bool = False) -> TERScore:
|
163 |
+
"""
|
164 |
+
Computes TER for a corpus against a single (or multiple) reference(s).
|
165 |
+
|
166 |
+
:param hypotheses: A sequence of hypothesis strings.
|
167 |
+
:param references: A sequence of reference documents with document being
|
168 |
+
defined as a sequence of reference strings.
|
169 |
+
:param normalized: Enable character normalization.
|
170 |
+
:param no_punct: Remove punctuation.
|
171 |
+
:param asian_support: Enable special treatment of Asian characters.
|
172 |
+
:param case_sensitive: Enables case-sensitivity.
|
173 |
+
:return: A `TERScore` object.
|
174 |
+
"""
|
175 |
+
metric = TER(
|
176 |
+
normalized=normalized,
|
177 |
+
no_punct=no_punct,
|
178 |
+
asian_support=asian_support,
|
179 |
+
case_sensitive=case_sensitive)
|
180 |
+
return metric.corpus_score(hypotheses, references)
|
181 |
+
|
182 |
+
|
183 |
+
def sentence_ter(hypothesis: str,
|
184 |
+
references: Sequence[str],
|
185 |
+
normalized: bool = False,
|
186 |
+
no_punct: bool = False,
|
187 |
+
asian_support: bool = False,
|
188 |
+
case_sensitive: bool = False) -> TERScore:
|
189 |
+
"""
|
190 |
+
Computes TER for a single hypothesis against a single (or multiple) reference(s).
|
191 |
+
|
192 |
+
:param hypothesis: A single hypothesis string.
|
193 |
+
:param references: A sequence of reference strings.
|
194 |
+
:param normalized: Enable character normalization.
|
195 |
+
:param no_punct: Remove punctuation.
|
196 |
+
:param asian_support: Enable special treatment of Asian characters.
|
197 |
+
:param case_sensitive: Enable case-sensitivity.
|
198 |
+
:return: A `TERScore` object.
|
199 |
+
"""
|
200 |
+
metric = TER(
|
201 |
+
normalized=normalized,
|
202 |
+
no_punct=no_punct,
|
203 |
+
asian_support=asian_support,
|
204 |
+
case_sensitive=case_sensitive)
|
205 |
+
return metric.sentence_score(hypothesis, references)
|
venv/lib/python3.10/site-packages/sacrebleu/dataset/__init__.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
venv/lib/python3.10/site-packages/sacrebleu/dataset/__main__.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
from . import DATASETS
|
4 |
+
|
5 |
+
try:
|
6 |
+
cmd = sys.argv[1]
|
7 |
+
except IndexError:
|
8 |
+
print(f"Usage: {sys.argv[0]} --check | --dump")
|
9 |
+
sys.exit(1)
|
10 |
+
|
11 |
+
if cmd == "--check":
|
12 |
+
import hashlib
|
13 |
+
import urllib.request
|
14 |
+
|
15 |
+
url_md5 = {}
|
16 |
+
|
17 |
+
for item in DATASETS.values():
|
18 |
+
if item.md5 is not None:
|
19 |
+
assert item.data
|
20 |
+
assert item.md5
|
21 |
+
assert len(item.data) == len(item.md5)
|
22 |
+
pairs = zip(item.data, item.md5)
|
23 |
+
for url, md5_hash in pairs:
|
24 |
+
url_md5[url] = md5_hash
|
25 |
+
|
26 |
+
for url, md5_hash in url_md5.items():
|
27 |
+
try:
|
28 |
+
print("Downloading ", url)
|
29 |
+
with urllib.request.urlopen(url) as f:
|
30 |
+
data = f.read()
|
31 |
+
except Exception as exc:
|
32 |
+
raise (exc)
|
33 |
+
|
34 |
+
if hashlib.md5(data).hexdigest() != md5_hash:
|
35 |
+
print("MD5 check failed for", url)
|
36 |
+
elif cmd == "--dump":
|
37 |
+
import re
|
38 |
+
|
39 |
+
# Dumps a table in markdown format
|
40 |
+
print(f'| {"Dataset":<30} | {"Description":<115} |')
|
41 |
+
header = "| " + "-" * 30 + " | " + "-" * 115 + " |"
|
42 |
+
print(header)
|
43 |
+
for name, item in DATASETS.items():
|
44 |
+
desc = re.sub(r"(http[s]?:\/\/\S+)", r"[URL](\1)", str(item.description))
|
45 |
+
print(f"| {name:<30} | {desc:<115} |")
|
venv/lib/python3.10/site-packages/sacrebleu/dataset/iwslt_xml.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .fake_sgml import FakeSGMLDataset
|
2 |
+
|
3 |
+
|
4 |
+
class IWSLTXMLDataset(FakeSGMLDataset):
|
5 |
+
"""IWSLT dataset format. Can be parsed with the lxml parser."""
|
6 |
+
|
7 |
+
# Same as FakeSGMLDataset. Nothing to do here.
|
8 |
+
pass
|
venv/lib/python3.10/site-packages/sacrebleu/dataset/plain_text.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from ..utils import smart_open
|
4 |
+
from .base import Dataset
|
5 |
+
|
6 |
+
|
7 |
+
class PlainTextDataset(Dataset):
|
8 |
+
"""
|
9 |
+
The plain text format. Data is separated into source and reference files.
|
10 |
+
Each line of the two files is aligned.
|
11 |
+
"""
|
12 |
+
|
13 |
+
def process_to_text(self, langpair=None):
|
14 |
+
"""Processes raw files to plain text files.
|
15 |
+
|
16 |
+
:param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
|
17 |
+
"""
|
18 |
+
# ensure that the dataset is downloaded
|
19 |
+
self.maybe_download()
|
20 |
+
langpairs = self._get_langpair_metadata(langpair)
|
21 |
+
|
22 |
+
for langpair in langpairs:
|
23 |
+
fieldnames = self.fieldnames(langpair)
|
24 |
+
origin_files = [
|
25 |
+
os.path.join(self._rawdir, path) for path in langpairs[langpair]
|
26 |
+
]
|
27 |
+
|
28 |
+
for field, origin_file in zip(fieldnames, origin_files):
|
29 |
+
|
30 |
+
origin_file = os.path.join(self._rawdir, origin_file)
|
31 |
+
output_file = self._get_txt_file_path(langpair, field)
|
32 |
+
|
33 |
+
with smart_open(origin_file) as fin:
|
34 |
+
with smart_open(output_file, "wt") as fout:
|
35 |
+
for line in fin:
|
36 |
+
print(line.rstrip(), file=fout)
|
venv/lib/python3.10/site-packages/sacrebleu/dataset/tsv.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from ..utils import smart_open
|
4 |
+
from .base import Dataset
|
5 |
+
|
6 |
+
|
7 |
+
class TSVDataset(Dataset):
|
8 |
+
"""
|
9 |
+
The format used by the MTNT datasets. Data is in a single TSV file.
|
10 |
+
"""
|
11 |
+
|
12 |
+
@staticmethod
|
13 |
+
def _split_index_and_filename(meta, field):
|
14 |
+
"""
|
15 |
+
Splits the index and filename from a metadata string.
|
16 |
+
|
17 |
+
e.g. meta="3:en-de.tsv", filed=[Any value] -> (3, "en-de.tsv")
|
18 |
+
"en-de.tsv", filed="src" -> (1, "en-de.tsv")
|
19 |
+
"en-de.tsv", filed="tgt" -> (2, "en-de.tsv")
|
20 |
+
"""
|
21 |
+
arr = meta.split(":")
|
22 |
+
if len(arr) == 2:
|
23 |
+
try:
|
24 |
+
index = int(arr[0])
|
25 |
+
except ValueError:
|
26 |
+
raise Exception(f"Invalid meta for TSVDataset: {meta}")
|
27 |
+
return index, arr[1]
|
28 |
+
|
29 |
+
else:
|
30 |
+
index = 0 if field == "src" else 1
|
31 |
+
return index, meta
|
32 |
+
|
33 |
+
def process_to_text(self, langpair=None):
|
34 |
+
"""Processes raw files to plain text files.
|
35 |
+
|
36 |
+
:param langpair: The language pair to process. e.g. "en-de". If None, all files will be processed.
|
37 |
+
"""
|
38 |
+
# ensure that the dataset is downloaded
|
39 |
+
self.maybe_download()
|
40 |
+
langpairs = self._get_langpair_metadata(langpair)
|
41 |
+
|
42 |
+
for langpair in langpairs:
|
43 |
+
fieldnames = self.fieldnames(langpair)
|
44 |
+
origin_files = [
|
45 |
+
os.path.join(self._rawdir, path) for path in langpairs[langpair]
|
46 |
+
]
|
47 |
+
|
48 |
+
for field, origin_file, meta in zip(
|
49 |
+
fieldnames, origin_files, langpairs[langpair]
|
50 |
+
):
|
51 |
+
index, origin_file = self._split_index_and_filename(meta, field)
|
52 |
+
|
53 |
+
origin_file = os.path.join(self._rawdir, origin_file)
|
54 |
+
output_file = self._get_txt_file_path(langpair, field)
|
55 |
+
|
56 |
+
with smart_open(origin_file) as fin:
|
57 |
+
with smart_open(output_file, "wt") as fout:
|
58 |
+
for line in fin:
|
59 |
+
# be careful with empty source or reference lines
|
60 |
+
# MTNT2019/ja-en.final.tsv:632 `'1033\t718\t\t\n'`
|
61 |
+
print(line.rstrip("\n").split("\t")[index], file=fout)
|
venv/lib/python3.10/site-packages/sacrebleu/py.typed
ADDED
File without changes
|