import sentencepiece as spm spm.SentencePieceTrainer.Train( input = 'all.txt', model_prefix = 'all', vocab_size = 50304, pad_id = 0, unk_id = 1, bos_id = 2, eos_id = 3, pad_piece = '', unk_piece = '', bos_piece = '', eos_piece = '', model_type = 'bpe', num_threads = 256, add_dummy_prefix = False, byte_fallback = True, character_coverage = 0.9999, user_defined_symbols =',,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,' ) exit() # from datasets import concatenate_datasets, load_dataset, load_from_disk # from datasets import DatasetDict, Dataset # import random # def train_tokenizer(args): code_dataset_go= load_dataset('code_x_glue_ct_code_to_text','go',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] code_dataset_java= load_dataset('code_x_glue_ct_code_to_text','java',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] code_dataset_javascript= load_dataset('code_x_glue_ct_code_to_text','javascript',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] code_dataset_php= load_dataset('code_x_glue_ct_code_to_text','php',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] code_dataset_python= load_dataset('code_x_glue_ct_code_to_text','python',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] code_dataset_ruby= load_dataset('code_x_glue_ct_code_to_text','ruby',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] # indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] # indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] # wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] # num_docs = 100000 # hi = random.sample(indic_datasets_hi, num_docs) # bn = random.sample(indic_datasets_bn, num_docs) # en = random.sample(wikipedia_en, num_docs) # # combined_train_set = wikipedia_en # # combined_train_set=code_dataset_go+code_dataset_java+code_dataset_javascript+code_dataset_php+code_dataset_python+code_dataset_ruby+indic_datasets_hi + indic_datasets_bn + wikipedia_en # combined_train_set= en + hi + bn # data = { # "train":{"text": combined_train_set}, # "validation": {"text": []}, # "test": {"text": []}, # } # # print(data) # custom_dataset = DatasetDict() # for split in data: # custom_dataset[split] = Dataset.from_dict(data[split]) # custom_dataset=custom_dataset["train"] # # lines = [] # with open('all.txt','w') as f: # for text in custom_dataset['text']: # lines = text.split("\n") # for l in lines: # f.write(l.strip() + '\n') # # break