|
import sentencepiece as spm |
|
from datasets import load_dataset |
|
|
|
def load_model(model_path): |
|
""" Load the SentencePiece model from the specified path. """ |
|
sp = spm.SentencePieceProcessor() |
|
sp.Load(model_path) |
|
return sp |
|
|
|
def tokenize_text(sp, text): |
|
""" Tokenize a list of sentences and return the tokens. """ |
|
tokenized_text = [sp.EncodeAsPieces(sentence) for sentence in text] |
|
return tokenized_text |
|
|
|
def calculate_fertility(tokenized_text): |
|
""" Calculate the average fertility of the tokenized text. """ |
|
total_words = sum(len(sentence.split()) for sentence in tokenized_text) |
|
total_tokens = sum(len(tokens) for tokens in tokenize_text(sp, tokenized_text)) |
|
return total_tokens / total_words if total_words > 0 else 0 |
|
|
|
|
|
model_path = 'gemma.model' |
|
sp = load_model(model_path) |
|
|
|
dataset_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE') |
|
dataset_bn= load_dataset('ai4bharat/samanantar', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE') |
|
|
|
|
|
|
|
|
|
|
|
|
|
sentences = dataset_hi['tgt'][:10000] |
|
tokenized_sentences = tokenize_text(sp, sentences) |
|
|
|
fertility = calculate_fertility(sentences) |
|
|
|
print("Fertility of Hindi tokenizer:", fertility) |
|
|
|
|
|
sentences = dataset_bn['tgt'][:10000] |
|
tokenized_sentences = tokenize_text(sp, sentences) |
|
|
|
fertility = calculate_fertility(sentences) |
|
|
|
print("Fertility of Bengali tokenizer:", fertility) |
|
|
|
|
|
sentences = dataset_hi['src'][:10000] |
|
tokenized_sentences = tokenize_text(sp, sentences) |
|
|
|
fertility = calculate_fertility(sentences) |
|
|
|
print("Fertility of English tokenizer:", fertility) |
|
|
|
|