import os from utils import read_jsonl_file, write_jsonl_file, parse ''' This script can divide the whole dataset into 26 parts via language ''' def preprocess(args): path = os.path.join(args.input_dir, "mkqa.jsonl") data = read_jsonl_file(path) ''' add add train/eval/test instruction and language chosen ''' locales = list(data[0]["answers"].keys()) for locale in locales: turns = [] for QA in data: t = {"turn": "single", "dialog": [], "knowledge": None, "goal": None, "QA": None} que = {"role": "ROLE1", "utterance": QA["queries"][locale], "utter_trans": QA["query"], # new feature "slot_value_table": [], "summary": None, "locale": locale, "scenario": None, "intent": None, "topic": None, "answer": None} # alternate answers aliases = [] if "aliases" not in QA["answers"][locale][0] else QA["answers"][locale][0]["aliases"] ans_svt = {"slot": QA["answers"][locale][0]["type"], "value": QA["answers"][locale][0]["text"], "act": None, "aliases": aliases} ans = {"role": "ROLE2", "utterance": ans_svt["value"], "utter_trans": QA["answers"]["en"][0]["text"], # new feature "slot_value_table": ans_svt, "summary": None, "locale": locale, "scenario": None, "intent": None, "topic": None, "answer": ans_svt["value"]} t["dialog"].append(que) t["dialog"].append(ans) turns.append(t) write_jsonl_file(turns, os.path.join(args.output_dir, locale + ".jsonl")) if __name__ == "__main__": args = parse() preprocess(args)