File size: 1,107 Bytes
c8ef0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import os
import sentencepiece as spm
from datasets import load_dataset

# Load the pre-trained SentencePiece model
sp = spm.SentencePieceProcessor()
sp.load('wiki_en.model')

# Load the Hindi dataset
dataset_hi = load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
test_hi = dataset_hi['tgt'][:1000]  # Assuming you want to use the first 10000 sentences for extending the vocabulary

with open('test_hi.txt', 'w', encoding='utf-8') as f:
    for sample in test_hi:
        f.write(sample + '\n')
# Concatenate all sentences into a single string
additional_text = "\n".join(test_hi)

# Train the model further to extend the vocabulary
spm.SentencePieceTrainer.Train(
    input="test_hi.txt", model_prefix='wiki_extended', vocab_size=3000
)

# Save the updated model with the extended vocabulary in the current working directory
sp = spm.SentencePieceProcessor()
save_path = 'wiki_extended.model'
sp.Load(save_path)

# Alternatively, you can specify a different directory with a shorter path
# save_path = '/short/path/to/wiki_extended.model'
# sp.save(save_path)