import sentencepiece as spm | |
spm.SentencePieceTrainer.Train( | |
input = 'all.txt', | |
model_prefix = 'all', | |
vocab_size = 50304, | |
pad_id = 0, | |
unk_id = 1, | |
bos_id = 2, | |
eos_id = 3, | |
pad_piece = '<pad>', | |
unk_piece = '<unk>', | |
bos_piece = '<bos>', | |
eos_piece = '<eos>', | |
model_type = 'bpe', | |
num_threads = 256, | |
add_dummy_prefix = False, | |
byte_fallback = True, | |
character_coverage = 0.9999, | |
user_defined_symbols ='<pad>,<eos>,<bos>,<unk>,<mask>,<unused0>,<unused1>,<unused2>,<unused3>,<unused4>,<unused5>,<unused6>,<unused7>,<unused8>,<unused9>,<unused10>,<unused11>,<unused12>,<unused13>,<unused14>,<unused15>,<unused16>,<unused17>,<unused18>,<unused19>,<unused20>,<unused21>,<unused22>,<unused23>,<unused24>,<unused25>,<unused26>,<unused27>,<unused28>,<unused29>,<unused30>,<unused31>,<unused32>,<unused33>,<unused34>,<unused35>,<unused36>,<unused37>,<unused38>,<unused39>,<unused40>,<unused41>,<unused42>,<unused43>,<unused44>,<unused45>,<unused46>,<unused47>,<unused48>,<unused49>,<unused50>,<unused51>,<unused52>,<unused53>,<unused54>,<unused55>,<unused56>,<unused57>,<unused58>,<unused59>,<unused60>,<unused61>,<unused62>,<unused63>,<unused64>,<unused65>,<unused66>,<unused67>,<unused68>,<unused69>,<unused70>,<unused71>,<unused72>,<unused73>,<unused74>,<unused75>,<unused76>,<unused77>,<unused78>,<unused79>,<unused80>,<unused81>,<unused82>,<unused83>,<unused84>,<unused85>,<unused86>,<unused87>,<unused88>,<unused89>,<unused90>,<unused91>,<unused92>,<unused93>,<unused94>,<unused95>,<unused96>,<unused97>,<unused98>,<start_of_turn>,<end_of_turn>' | |
) | |
exit() | |
# from datasets import concatenate_datasets, load_dataset, load_from_disk | |
# from datasets import DatasetDict, Dataset | |
# import random | |
# def train_tokenizer(args): | |
code_dataset_go= load_dataset('code_x_glue_ct_code_to_text','go',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
code_dataset_java= load_dataset('code_x_glue_ct_code_to_text','java',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
code_dataset_javascript= load_dataset('code_x_glue_ct_code_to_text','javascript',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
code_dataset_php= load_dataset('code_x_glue_ct_code_to_text','php',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
code_dataset_python= load_dataset('code_x_glue_ct_code_to_text','python',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
code_dataset_ruby= load_dataset('code_x_glue_ct_code_to_text','ruby',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] | |
# indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] | |
# indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] | |
# wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] | |
# num_docs = 100000 | |
# hi = random.sample(indic_datasets_hi, num_docs) | |
# bn = random.sample(indic_datasets_bn, num_docs) | |
# en = random.sample(wikipedia_en, num_docs) | |
# # combined_train_set = wikipedia_en | |
# # combined_train_set=code_dataset_go+code_dataset_java+code_dataset_javascript+code_dataset_php+code_dataset_python+code_dataset_ruby+indic_datasets_hi + indic_datasets_bn + wikipedia_en | |
# combined_train_set= en + hi + bn | |
# data = { | |
# "train":{"text": combined_train_set}, | |
# "validation": {"text": []}, | |
# "test": {"text": []}, | |
# } | |
# # print(data) | |
# custom_dataset = DatasetDict() | |
# for split in data: | |
# custom_dataset[split] = Dataset.from_dict(data[split]) | |
# custom_dataset=custom_dataset["train"] | |
# # lines = [] | |
# with open('all.txt','w') as f: | |
# for text in custom_dataset['text']: | |
# lines = text.split("\n") | |
# for l in lines: | |
# f.write(l.strip() + '\n') | |
# # break | |