applied-ai-018's picture
Add files using upload-large-folder tool
c8ef0a6 verified
import os
import sentencepiece as spm
from datasets import load_dataset
# Load the pre-trained SentencePiece model
sp = spm.SentencePieceProcessor()
sp.load('wiki_en.model')
# Load the Hindi dataset
dataset_hi = load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
test_hi = dataset_hi['tgt'][:1000] # Assuming you want to use the first 10000 sentences for extending the vocabulary
with open('test_hi.txt', 'w', encoding='utf-8') as f:
for sample in test_hi:
f.write(sample + '\n')
# Concatenate all sentences into a single string
additional_text = "\n".join(test_hi)
# Train the model further to extend the vocabulary
spm.SentencePieceTrainer.Train(
input="test_hi.txt", model_prefix='wiki_extended', vocab_size=3000
)
# Save the updated model with the extended vocabulary in the current working directory
sp = spm.SentencePieceProcessor()
save_path = 'wiki_extended.model'
sp.Load(save_path)
# Alternatively, you can specify a different directory with a shorter path
# save_path = '/short/path/to/wiki_extended.model'
# sp.save(save_path)