File size: 2,111 Bytes
c8ef0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import sentencepiece as spm
from datasets import load_dataset

def load_model(model_path):
    """ Load the SentencePiece model from the specified path. """
    sp = spm.SentencePieceProcessor()
    sp.Load(model_path)
    return sp

def tokenize_text(sp, text):
    """ Tokenize a list of sentences and return the tokens. """
    tokenized_text = [sp.EncodeAsPieces(sentence) for sentence in text]
    return tokenized_text

def calculate_fertility(tokenized_text):
    """ Calculate the average fertility of the tokenized text. """
    total_words = sum(len(sentence.split()) for sentence in tokenized_text)
    total_tokens = sum(len(tokens) for tokens in tokenize_text(sp, tokenized_text))
    return total_tokens / total_words if total_words > 0 else 0

# Path to your SentencePiece model
model_path = 'gemma.model'
sp = load_model(model_path)

dataset_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
dataset_bn= load_dataset('ai4bharat/samanantar', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')

# indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['test']['text']
# Tokenize the sentences

sentences = dataset_hi['tgt'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)

print("Fertility of Hindi tokenizer:", fertility)


sentences = dataset_bn['tgt'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)

print("Fertility of Bengali tokenizer:", fertility)


sentences = dataset_hi['src'][:10000]
tokenized_sentences = tokenize_text(sp, sentences)
# Calculate the fertility
fertility = calculate_fertility(sentences)

print("Fertility of English tokenizer:", fertility)