|
from datasets import concatenate_datasets, load_dataset, load_from_disk |
|
import argparse |
|
from tokenizers import Tokenizer, decoders, models, pre_tokenizers, processors, trainers |
|
from transformers import GPT2TokenizerFast, AutoTokenizer |
|
from datasets import config |
|
from datasets import DatasetDict, Dataset |
|
|
|
code_dataset_go= load_dataset('code_x_glue_ct_code_to_text','go',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
code_dataset_java= load_dataset('code_x_glue_ct_code_to_text','java',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
code_dataset_javascript= load_dataset('code_x_glue_ct_code_to_text','javascript',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
code_dataset_php= load_dataset('code_x_glue_ct_code_to_text','php',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
code_dataset_python= load_dataset('code_x_glue_ct_code_to_text','python',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
code_dataset_ruby= load_dataset('code_x_glue_ct_code_to_text','ruby',split='train', cache_dir='/sml2/atul/CENTRAL_CACHE')['code'] |
|
|
|
indic_datasets_hi= load_dataset('ai4bharat/sangraha', data_dir="verified/hin", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['type'][:1000000] |
|
indic_datasets_bn= load_dataset('ai4bharat/sangraha', data_dir="verified/ben", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['type'][:1000000] |
|
wikipedia_en = load_dataset("wikipedia", "20220301.en", cache_dir='/sml2/atul/CENTRAL_CACHE')['train']['text'][:1000000] |
|
|
|
|
|
combined_train_set=code_dataset_go+code_dataset_java+code_dataset_javascript+code_dataset_php+code_dataset_python+code_dataset_ruby+indic_datasets_hi + indic_datasets_bn + wikipedia_en |
|
|
|
data = { |
|
"train":{"text": combined_train_set}, |
|
"validation": {"text": []}, |
|
"test": {"text": []}, |
|
} |
|
|
|
custom_dataset = DatasetDict() |
|
for split in data: |
|
custom_dataset[split] = Dataset.from_dict(data[split]) |
|
print(custom_dataset) |