File size: 5,394 Bytes
45361be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
from datasets import concatenate_datasets, load_dataset, load_from_disk
import argparse
from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers
from transformers import GPT2TokenizerFast, AutoTokenizer
from datasets import config
import logging
def initialize_logger(log_file):
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s')
def log_parameters(vocab_size, batch_size, fertility_score, proportion_continued_words, log_file='parameters.log'):
initialize_logger(log_file)
logging.info(f"Vocabulary Size: {vocab_size}, Batch Size: {batch_size}, Fertility Score: {fertility_score}, Proportion of Continued word: {proportion_continued_words}")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size",
type=int,
required=True,
help="Batch size to use for training"
)
parser.add_argument(
"--vocab_size",
type=int,
required=True,
help="Vocabulary size to use for tokenizer"
)
parser.add_argument(
"--use_config",
choices=['xlm-roberta', 'vanilla'],
required=True,
help="Use XLM-RoBERTa config or Vanilla BPE"
)
parser.add_argument(
"--do_evaluate",
action='store_true',
help="Enable evaluation."
)
args = parser.parse_known_args()
return args
def calculate_proportion_continued_words(tokenizer, sentences):
total_continued_words = 0
total_words = 0
for sentence in sentences:
tok = tokenizer.encode_plus(sentence, return_tensors="pt")
input_ids = tok['input_ids'].squeeze(0)
continued_word = False
for i in range(1, len(input_ids)):
if input_ids[i] != tokenizer.pad_token_id:
if continued_word:
total_continued_words += 1
continued_word = True
else:
continued_word = False
total_words += len(sentence.split())
proportion_continued_words = total_continued_words / total_words if total_words > 0 else 0
return proportion_continued_words
def train_tokenizer(args):
# configs = ['as', 'bd', 'bn', 'dg', 'en', 'gom', 'gu', 'hi', 'kha', 'kn', 'ks', 'mai', 'ml', 'mni', 'mr', 'ne', 'or', 'pa', 'sa', 'sat', 'sd', 'ta', 'te', 'ur']
indic_datasets = []
configs=['hi']
# for c in configs:
# indic_dataset = load_dataset('satpalsr/indicCorpv2', c, split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/')
# indic_datasets.extend(indic_dataset)
# wikidataset= load_dataset('wiki40b', 'en', split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/')
indic_datasets_hi= load_dataset('satpalsr/indicCorpv2', 'hi', split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/')
indic_datasets_en= load_dataset('satpalsr/indicCorpv2', 'en', split='train', cache_dir='/home1/BharatGPT_tokenizer/hf/')
# = wikidataset.remove_columns(['wikidata_id', 'version_id'])
print(indic_datasets)
# print(wikidataset)
dataset = concatenate_datasets([indic_datasets_en,indic_datasets_hi])
test_data = load_from_disk('samanantar_data')
test_data = dataset['text'][:10000]
print(f"[INFO] {len(test_data)}")
print(f"[INFO] {len(dataset)}")
# print(f"[INFO] {test_data[:10]}")
def batch_iterator():
for idx in range(0, len(dataset), args.batch_size):
yield dataset[idx: idx + args.batch_size]['text']
if args.use_config == 'vanilla':
tokenizer = Tokenizer(models.BPE())
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
print(f"[INFO] The brown fox jumped over the lazy dog\n{tokenizer.pre_tokenizer.pre_tokenize_str('The brown fox jumped over the lazy dog')}")
print(f"[INFO] Training...")
trainer = trainers.BpeTrainer(vocab_size=args.vocab_size, special_tokens=["<|endoftext|>"])
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer)
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
tokenizer.decoder = decoders.ByteLevel()
tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer)
elif args.use_config == 'xlm-roberta':
print("skipped")
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size)
trained_tokenizer.save_pretrained('hi-bgpt-bpe-tokenizer1')
print(f"[INFO] Tokenizer saved to disk")
if args.do_evaluate:
print(f"[INFO] Running evaluation using fertility and fraction of continued words")
tokenizer = AutoTokenizer.from_pretrained('hi-bgpt-bpe-tokenizer1')
# tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
fertility = 0
for sentence in test_data:
tok=tokenizer(sentence)
fertility += len(tok['input_ids']) / len(sentence.split())
average_fertility = fertility / len(test_data)
proportion_continued_words = calculate_proportion_continued_words(tokenizer, test_data)
log_parameters(args.vocab_size, args.batch_size, average_fertility, proportion_continued_words)
def main():
args, _ = parse_arguments()
train_tokenizer(args)
if __name__ == "__main__":
main()
|