|
import sentencepiece as spm |
|
import re |
|
|
|
|
|
from datasets import load_dataset, DatasetDict, Dataset |
|
import random |
|
|
|
|
|
code_dataset_go = load_dataset('code_x_glue_ct_code_to_text', 'go', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
code_dataset_java = load_dataset('code_x_glue_ct_code_to_text', 'java', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
code_dataset_javascript = load_dataset('code_x_glue_ct_code_to_text', 'javascript', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
code_dataset_php = load_dataset('code_x_glue_ct_code_to_text', 'php', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
code_dataset_python = load_dataset('code_x_glue_ct_code_to_text', 'python', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
code_dataset_ruby = load_dataset('code_x_glue_ct_code_to_text', 'ruby', split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'][:400000] |
|
|
|
|
|
indic_datasets_hi = load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
indic_datasets_kan = load_dataset('ai4bharat/sangraha', data_dir="verified/kan", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
indic_datasets_ml = load_dataset('ai4bharat/sangraha', data_dir="verified/mal", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
indic_datasets_ta = load_dataset('ai4bharat/sangraha', data_dir="verified/tam", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
indic_datasets_te = load_dataset('ai4bharat/sangraha', data_dir="verified/tel", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
|
|
wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'] |
|
|
|
|
|
num_docs = 300000 |
|
hi_sampled = random.sample(indic_datasets_hi, num_docs) |
|
kan_sampled = random.sample(indic_datasets_kan, num_docs) |
|
ml_sampled = random.sample(indic_datasets_ml, num_docs) |
|
ta_sampled = random.sample(indic_datasets_ta, num_docs) |
|
te_sampled = random.sample(indic_datasets_te, num_docs) |
|
en_sampled = random.sample(wikipedia_en, num_docs) |
|
|
|
|
|
combined_train_set = en_sampled + hi_sampled + kan_sampled + ml_sampled + ta_sampled + te_sampled |
|
combined_train_set_code = code_dataset_go + code_dataset_java + code_dataset_javascript + code_dataset_php + code_dataset_python + code_dataset_ruby |
|
|
|
|
|
with open('ta_te_kan_ml_traintext.txt', 'w') as f: |
|
for text in combined_train_set: |
|
lines = text.split("\n") |
|
for l in lines: |
|
if l: |
|
f.write(l.strip() + '\n') |
|
for code in combined_train_set_code: |
|
f.write(code + '\n') |
|
|
|
|
|
spm.SentencePieceTrainer.Train( |
|
input='ta_te_kan_ml_traintext.txt', |
|
model_prefix='ta_te_kan_ml_50kspm_tokenizer', |
|
vocab_size=64000, |
|
pad_id=0, |
|
unk_id=1, |
|
bos_id=2, |
|
eos_id=3, |
|
pad_piece='<pad>', |
|
unk_piece='<unk>', |
|
bos_piece='<bos>', |
|
eos_piece='<eos>', |
|
model_type='bpe', |
|
num_threads=256, |
|
add_dummy_prefix=False, |
|
byte_fallback=True, |
|
character_coverage=0.9999, |
|
remove_extra_whitespaces=False, |
|
allow_whitespace_only_pieces=True, |
|
split_digits=True, |
|
user_defined_symbols='\n,\r,<pad>,<eos>,<bos>,<mask>,<unused0>,<unused1>,<unused2>,<unused3>,<unused4>,<unused5>,<unused6>,<unused7>,<unused8>,<unused9>,<unused10>,<unused11>,<unused12>,<unused13>,<unused14>,<unused15>,<unused16>,<unused17>,<unused18>,<unused19>,<unused20>,<unused21>,<unused22>,<unused23>,<unused24>,<unused25>,<unused26>,<unused27>,<unused28>,<unused29>,<unused30>,<unused31>,<unused32>,<unused33>,<unused34>,<unused35>,<unused36>,<unused37>,<unused38>,<unused39>,<unused40>,<unused41>,<unused42>,<unused43>,<unused44>,<unused45>,<unused46>,<unused47>,<unused48>,<unused49>,<unused50>,<unused51>,<unused52>,<unused53>,<unused54>,<unused55>,<unused56>,<unused57>,<unused58>,<unused59>,<unused60>,<unused61>,<unused62>,<unused63>,<unused64>,<unused65>,<unused66>,<unused67>,<unused68>,<unused69>,<unused70>,<unused71>,<unused72>,<unused73>,<unused74>,<unused75>,<unused76>,<unused77>,<unused78>,<unused79>,<unused80>,<unused81>,<unused82>,<unused83>,<unused84>,<unused85>,<unused86>,<unused87>,<unused88>,<unused89>,<unused90>,<unused91>,<unused92>,<unused93>,<unused94>,<unused95>,<unused96>,<unused97>,<unused98>,<start_of_turn>,<end_of_turn>,〈|javascript|〉,〈|python|〉,〈|sql|〉,〈|shell|〉,〈|c|〉,〈|cpp|〉,〈|java|〉,〈|go|〉', |
|
) |
|
|