applied-ai-018's picture
Add files using upload-large-folder tool
c8ef0a6 verified
from datasets import concatenate_datasets, load_dataset, load_from_disk
import argparse
from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers
from transformers import GPT2TokenizerFast, AutoTokenizer
from datasets import config
import logging
from datasets import DatasetDict, Dataset
import csv
import time
import json
tokenizer = AutoTokenizer.from_pretrained('Telugu-LLM-Labs/Telugu-Llama2-7B-v0-Base')
def initialize_logger(log_file):
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s')
def log_parameters(vocab_size, pretrained_model, en_fertility_score, hi_fertility_score , ta_fertility_score , log_file='parameters.log'):
initialize_logger(log_file)
logging.info(f"Vocabulary Size: {vocab_size}, Tokenizer type: {pretrained_model}, English Fertility Score: {en_fertility_score} , Hindi Fertility Score: {hi_fertility_score}, Telugu Fertility Score: {ta_fertility_score}")
dataset_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
dataset_ta= load_dataset('ai4bharat/samanantar', 'te', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
test_en = dataset_hi['src'][:10000]
test_hi = dataset_hi['tgt'][:10000]
test_ta = dataset_ta['tgt'][:10000]
en_fertility_score=0
hi_fertility_score=0
ta_fertility_score=0
for data in test_en:
tok=tokenizer(data)
en_fertility_score += len(tok['input_ids']) / len(data.split())
en_fertility_score=en_fertility_score/10000
for data in test_hi:
# print(data)
tok=tokenizer(data)
# print(tok)
# exit()
hi_fertility_score += len(tok['input_ids']) / len(data.split())
hi_fertility_score=hi_fertility_score/10000
for data in test_ta:
tok=tokenizer(data)
ta_fertility_score += len(tok['input_ids']) / len(data.split())
ta_fertility_score=ta_fertility_score/10000
log_parameters(64000, "Telugu-Llama7B", en_fertility_score, hi_fertility_score , ta_fertility_score )