File size: 4,409 Bytes
c8ef0a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from datasets import concatenate_datasets, load_dataset, load_from_disk
import argparse
from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers
from transformers import GPT2TokenizerFast, AutoTokenizer
from datasets import config
from datasets import DatasetDict, Dataset
import logging

def initialize_logger(log_file):
    logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s: %(message)s')

def log_parameters(vocab_size, batch_size, fertility_score, proportion_continued_words, log_file='parameters.log'):
    initialize_logger(log_file)
    logging.info(f"Vocabulary Size: {vocab_size}, Batch Size: {batch_size}, Fertility Score: {fertility_score}, Proportion of Continued word: {proportion_continued_words}")

def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--batch_size",
        type=int,
        required=True,
        help="Batch size to use for training"
    )
    parser.add_argument(
        "--vocab_size",
        type=int,
        required=True,
        help="Vocabulary size to use for tokenizer"
    )
    parser.add_argument(
        "--use_config",
        choices=['xlm-roberta', 'vanilla','gemma'],
        required=True,
        help="Use XLM-RoBERTa config or Vanilla BPE"
    )
    parser.add_argument(
        "--do_evaluate",
        action='store_true',
        help="Enable evaluation."
    )
    args = parser.parse_known_args()
    return args
def train_tokenizer(args):
    # indic_datasets_hi= load_dataset('satpalsr/indicCorpv2', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')['text'][:20502390]
    indic_datasets_en= load_dataset('satpalsr/indicCorpv2', 'en', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')['text'][:205090]
    # indic_datasets_bn= load_dataset('satpalsr/indicCorpv2', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')['text'][:20502390]
    # combined_train_set= indic_datasets_hi + indic_datasets_en + indic_datasets_bn
    combined_train_set=indic_datasets_en
    data = {
        "train":{"text": combined_train_set},
        "validation": {"text": []},
        "test": {"text": []},
    }
    # print(data)
    custom_dataset = DatasetDict()
    for split in data:
        custom_dataset[split] = Dataset.from_dict(data[split])
    custom_dataset=custom_dataset["train"]
    def batch_iterator():
        for idx in range(0, len(custom_dataset), args.batch_size):
            yield custom_dataset[idx: idx + args.batch_size]['text']

    if args.use_config == 'vanilla':
        tokenizer = Tokenizer(models.BPE())
        tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
        print(f"[INFO] The brown fox jumped over the lazy dog\n{tokenizer.pre_tokenizer.pre_tokenize_str('The brown fox jumped over the lazy dog')}")
        print(f"[INFO] Training...")
        trainer = trainers.BpeTrainer(vocab_size=args.vocab_size, special_tokens=["<|endoftext|>"])
        tokenizer.train_from_iterator(batch_iterator(), trainer=trainer)
        tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
        tokenizer.decoder = decoders.ByteLevel()
        tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer)
    elif args.use_config == 'xlm-roberta':
        print("skipped")
        tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
        trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size)
    elif args.use_config == 'gemma':
        print("skipped")
        tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/dummy-gemma')
        trained_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(), vocab_size=args.vocab_size)

    trained_tokenizer.save_pretrained('hi-indiccorp-gemma-bgpt-bpe-tokenizer1')
    print(f"[INFO] Tokenizer saved to disk")




    # test_hi= load_dataset('ai4bharat/samanantar', 'hi', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
    # test_bn= load_dataset('ai4bharat/samanantar', 'bn', split='train', cache_dir='/sml1/atul/CENTRAL_CACHE')
    # print("-------------------------------")
    # print(test_hi)
    # print(test_bn)
    # print("-------------------------------")
    # print(len(test_hi["tgt"]))
    # print(len(test_bn["tgt"]))
def main():
    args, _ = parse_arguments()
    train_tokenizer(args)

if __name__ == "__main__":
    main()