applied-ai-018's picture
Add files using upload-large-folder tool
c8ef0a6 verified
import sentencepiece as spm
from datasets import load_dataset
def load_model(model_path):
""" Load the SentencePiece model from the specified path. """
sp = spm.SentencePieceProcessor()
sp.Load(model_path)
return sp
def tokenize_text(sp, text):
""" Tokenize a list of sentences and return the tokens. """
tokenized_text = [sp.EncodeAsPieces(sentence) for sentence in text]
return tokenized_text
def calculate_fertility(tokenized_text):
""" Calculate the average fertility of the tokenized text. """
total_words = sum(len(sentence.split()) for sentence in tokenized_text)
total_tokens = sum(len(tokens) for tokens in tokenize_text(sp, tokenized_text))
return total_tokens / total_words if total_words > 0 else 0
# Path to your SentencePiece model
model_path = 'gemma.model'
sp = load_model(model_path)
dataset_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
dataset_bn= load_dataset('ai4bharat/samanantar', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
# indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# Tokenize the sentences
sentences = dataset_hi['tgt'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)
print("Fertility of Hindi tokenizer:", fertility)
sentences = dataset_bn['tgt'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)
print("Fertility of Bengali tokenizer:", fertility)
sentences = dataset_hi['src'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)
print("Fertility of English tokenizer:", fertility)