applied-ai-018's picture
Add files using upload-large-folder tool
c8ef0a6 verified
import sentencepiece as spm
from tokenizers import Tokenizer, normalizers, pre_tokenizers, decoders, trainers, models
from tokenizers.models import BPE, Unigram
from transformers import PreTrainedTokenizerFast, convert_slow_tokenizer
import warnings
from typing import Dict, List, Tuple
from packaging import version
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram, WordPiece
def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str:
if add_prefix_space:
prepend_scheme = "always"
if not getattr(original_tokenizer, "legacy", True):
prepend_scheme = "first"
else:
prepend_scheme = "never"
return prepend_scheme
class SpmConverter2(convert_slow_tokenizer.SpmConverter):
def __init__(self, *args):
convert_slow_tokenizer.requires_backends(self, "protobuf")
super().__init__(*args)
# from .utils import sentencepiece_model_pb2 as model_pb2
model_pb2 = convert_slow_tokenizer.import_protobuf()
m = model_pb2.ModelProto()
with open(self.original_tokenizer.vocab_file, "rb") as f:
m.ParseFromString(f.read())
self.proto = m
if self.proto.trainer_spec.byte_fallback:
if not getattr(self, "handle_byte_fallback", None):
warnings.warn(
"The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option"
" which is not implemented in the fast tokenizers. In practice this means that the fast version of the"
" tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these "
"unknown tokens into a sequence of byte tokens matching the original piece of text."
)
def tokenizer(self, proto):
model_type = proto.trainer_spec.model_type
vocab_scores = self.vocab(proto)
unk_id = self.unk_id(proto)
_, merges = convert_slow_tokenizer.SentencePieceExtractor(self.original_tokenizer.vocab_file).extract()
bpe_vocab = {word: i for i, (word, score) in enumerate(vocab_scores)}
tokenizer = Tokenizer(
BPE(
bpe_vocab,
merges,
unk_token=proto.trainer_spec.unk_piece,
fuse_unk=True,
)
)
return tokenizer
# Usage
spm_tokenizer = spm.SentencePieceProcessor(model_file="/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/all.model")
spm_tokenizer.vocab_file = "/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/all.model"
spm_converter = SpmConverter2(spm_tokenizer)
converted = spm_converter.converted()
converted.save('/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/converted.json')
tok = PreTrainedTokenizerFast(
tokenizer_file='/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/converted.json',
clean_up_tokenization_spaces=False,
pad_token='<PAD>',
unk_token='<UNK>',
bos_token='<BOS>',
eos_token='<EOS>',
mask_token='<MASK>',
model_max_length=1024,
padding_side='right',
truncation_side='right'
)
tok.save_pretrained('/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer')