tokenGPT-2 / app.py
Itsme5's picture
Update app.py
2b354eb verified
raw
history blame
428 Bytes
from tokenizers import models, trainers, Tokenizer
tokenizer = Tokenizer(model=models.WordPiece(unk_token="[UNK]"))
special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
tokenizer.train(["wikitext-2.txt"], trainer=trainer)
encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
print(encoding.ids)