diff --git a/lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_sin_Latn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_sin_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c035361f22ab53504eb6a94c4a23c787ab92c05 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_sin_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "sin_Latn" +"include": "_default_template_yaml" +"task": "belebele_sin_Latn" +"test_split": "sin_Latn" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qa4mre/qa4mre_2011.yaml b/lm-evaluation/build/lib/lm_eval/tasks/qa4mre/qa4mre_2011.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9ceb78094abcf60b378d695936f1548a2d69188 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qa4mre/qa4mre_2011.yaml @@ -0,0 +1,22 @@ +group: + - qa4mre +task: qa4mre_2011 +dataset_path: qa4mre +dataset_name: 2011.main.EN +output_type: multiple_choice +test_split: train +# doc_to_text: "{{document_str.strip()}}\nQuestion: {{question_str}}\nChoices:\n- {{answer_choices|join('\n- ')}}\nAnswer:" +doc_to_text: "{{document_str.strip()}}\nQuestion: {{question_str}}\nAnswer:" +doc_to_target: "{{correct_answer_id|int - 1}}" +doc_to_choice: "{{answer_options.answer_str}}" +should_decontaminate: true +doc_to_decontamination_query: "{{document_str.strip()}} + ' ' + {{question_str}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_default_template_yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ece2e2d84cb43f6e1d7403ae83a73be41e164f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_default_template_yaml @@ -0,0 +1,19 @@ +dataset_path: ZoneTwelve/tmmluplus # a copy of `ikala/tmmluplus` +test_split: test +fewshot_split: train +fewshot_config: + sampler: first_n +output_type: multiple_choice +process_docs: !function utils.process_docs +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..e313e9b1ea053b4a97f19d8dcbcdfe2cf86f856a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/_generate_configs.py @@ -0,0 +1,210 @@ +""" +Take in a YAML, and output all "other" splits with this YAML +""" +import argparse +import os + +import pandas as pd +import yaml +from tqdm import tqdm + + +# Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py +# from TMMLU+ offical example +categories = { + "STEM": [ + "physics", + "chemistry", + "biology", + "computer science", + "math", + "engineering", + ], + "humanities": ["history", "philosophy", "law"], + "social_sciences": [ + "politics", + "culture", + "economics", + "geography", + "psychology", + "education", + ], + "other": ["other", "business", "health"], # (business, health, misc.) +} + +task_list = [ + "engineering_math", + "dentistry", + "traditional_chinese_medicine_clinical_medicine", + "clinical_psychology", + "technical", + "culinary_skills", + "mechanical", + "logic_reasoning", + "real_estate", + "general_principles_of_law", + "finance_banking", + "anti_money_laundering", + "ttqav2", + "marketing_management", + "business_management", + "organic_chemistry", + "advance_chemistry", + "physics", + "secondary_physics", + "human_behavior", + "national_protection", + "jce_humanities", + "politic_science", + "agriculture", + "official_document_management", + "financial_analysis", + "pharmacy", + "educational_psychology", + "statistics_and_machine_learning", + "management_accounting", + "introduction_to_law", + "computer_science", + "veterinary_pathology", + "accounting", + "fire_science", + "optometry", + "insurance_studies", + "pharmacology", + "taxation", + "education_(profession_level)", + "economics", + "veterinary_pharmacology", + "nautical_science", + "occupational_therapy_for_psychological_disorders", + "trust_practice", + "geography_of_taiwan", + "physical_education", + "auditing", + "administrative_law", + "basic_medical_science", + "macroeconomics", + "trade", + "chinese_language_and_literature", + "tve_design", + "junior_science_exam", + "junior_math_exam", + "junior_chinese_exam", + "junior_social_studies", + "tve_mathematics", + "tve_chinese_language", + "tve_natural_sciences", + "junior_chemistry", + "music", + "education", + "three_principles_of_people", + "taiwanese_hokkien", +] +subject2name = {} +# subject2category = {} +SUBJECTS = {} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="tmmluplus") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + parser.add_argument("--group_prefix", default="") + parser.add_argument("--subject_file", default="subject.tsv") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + from pathlib import Path + + # Initialization + SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file) + + df = pd.read_csv(SUBJECT_FILE, delimiter="\t") + + for _, row in df.iterrows(): + for _c in categories: + if row["subject"] in SUBJECTS: + raise ValueError("Duplicate tasks.") + if row["category"] in categories[_c]: # append new item into SUBJECTS + SUBJECTS[row["subject"]] = _c + subject2name[row["subject"]] = row["name"] + break + # End of SUBJECTS initialization + + # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path) as f: + cot_file = json.load(f) + + ALL_CATEGORIES = [] + for subject, category in tqdm(SUBJECTS.items()): + if category not in ALL_CATEGORIES: + ALL_CATEGORIES.append(category) + + if args.cot_prompt_path is not None: + description = cot_file[subject] + else: + name_of_subject = subject2name[subject].replace("_", " ") + description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n" + # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" + + yaml_dict = { + "include": base_yaml_name, + "group": f"tmmluplus_{args.task_prefix}_{category}" + if args.task_prefix != "" + else f"tmmluplus_{category}", + "group_alias": category.replace("_", " "), + "task": f"tmmluplus_{args.task_prefix}_{subject}" + if args.task_prefix != "" + else f"tmmluplus_{subject}", + "task_alias": subject.replace("_", " "), + "dataset_name": subject, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject}.yaml" + # eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + # width=float("inf"), + allow_unicode=True, + default_style='"', + ) + + if args.task_prefix != "": + mmlu_subcategories = [ + f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES + ] + else: + mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES] + + if args.group_prefix != "": + file_save_path = args.group_prefix + ".yaml" + else: + file_save_path = args.save_prefix_path + ".yaml" + + # eval_logger.info(f"Saving benchmark config to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + { + "group": f"tmmluplus_{args.task_prefix}" + if args.task_prefix != "" + else "tmmluplus", + "task": mmlu_subcategories, + }, + yaml_file, + indent=4, + default_flow_style=False, + ) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..105cf98aff37b28535e8166ae685e5fac105eaed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml @@ -0,0 +1,6 @@ +group: tmmluplus +task: +- tmmluplus_other +- tmmluplus_social_sciences +- tmmluplus_humanities +- tmmluplus_STEM diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e27c919efd224866e9ceeb38424e0147a454193 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml @@ -0,0 +1,7 @@ +"dataset_name": "accounting" +"description": "以下為會計學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_accounting" +"task_alias": "accounting" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..454efec64feb6daa0f4bc91f542b23003b2e62d9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml @@ -0,0 +1,7 @@ +"dataset_name": "administrative_law" +"description": "以下為行政法的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_administrative_law" +"task_alias": "administrative law" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5baa64be2e643521a1f486a4618babca2ca4ef6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "advance_chemistry" +"description": "以下為化學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_advance_chemistry" +"task_alias": "advance chemistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..340369c89024e057f9e73945c32655555b666c29 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml @@ -0,0 +1,7 @@ +"dataset_name": "agriculture" +"description": "以下為農業的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_agriculture" +"task_alias": "agriculture" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cacf04896d941ec705d1c3774952cbb516236f5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "business_management" +"description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_business_management" +"task_alias": "business management" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73eba7684bff03a432c85228f7c9a764ca2544ad --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml @@ -0,0 +1,7 @@ +"dataset_name": "chinese_language_and_literature" +"description": "以下為國文的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_chinese_language_and_literature" +"task_alias": "chinese language and literature" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8194feb7dee9c2100f6ecf50b602235d1ac0a2a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "clinical_psychology" +"description": "以下為臨床心理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_clinical_psychology" +"task_alias": "clinical psychology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6295240fc3a37046d0c8d0038eb58130667a807 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "dentistry" +"description": "以下為牙醫學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_dentistry" +"task_alias": "dentistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ed100fb42d428d0afd0c26f560da9700eb30b04 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "economics" +"description": "以下為經濟學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_economics" +"task_alias": "economics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc601810008841e7d2bca53680b237c2b52c16ff --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml @@ -0,0 +1,7 @@ +"dataset_name": "education" +"description": "以下為教育常識的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_education" +"task_alias": "education" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bed8f2d41be8aa0349ea47a153212d8ffa7e5bb3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "educational_psychology" +"description": "以下為教育心理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_educational_psychology" +"task_alias": "educational psychology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf681cd57f2e3b2096080c08a6c2e3de2eaa1443 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml @@ -0,0 +1,7 @@ +"dataset_name": "engineering_math" +"description": "以下為工程數學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_engineering_math" +"task_alias": "engineering math" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e60086e12e5e97ec9df7ff5c616df95f92762ed1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml @@ -0,0 +1,7 @@ +"dataset_name": "finance_banking" +"description": "以下為金融與法規的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_finance_banking" +"task_alias": "finance banking" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9990ab5d0447b969c6f5ae026d5db0d388a00b29 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml @@ -0,0 +1,7 @@ +"dataset_name": "financial_analysis" +"description": "以下為財務分析的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_financial_analysis" +"task_alias": "financial analysis" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..807f6f5f2ea23185bf82734e860f650f96ae8984 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml @@ -0,0 +1,7 @@ +"dataset_name": "fire_science" +"description": "以下為火災學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_fire_science" +"task_alias": "fire science" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30b21caeada339782994aeedb9d92d1c77b683c5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml @@ -0,0 +1,7 @@ +"dataset_name": "general_principles_of_law" +"description": "以下為法學大意的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_general_principles_of_law" +"task_alias": "general principles of law" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80ab36b73d77f58ef11f6a6aa047b51d2ca2cad2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml @@ -0,0 +1,7 @@ +"dataset_name": "geography_of_taiwan" +"description": "以下為台灣地理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_geography_of_taiwan" +"task_alias": "geography of taiwan" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54aaa80fa3b24df4452b3a5c2c75fdb29bb51cdb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml @@ -0,0 +1,7 @@ +"dataset_name": "human_behavior" +"description": "以下為人類行為與社會的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_human_behavior" +"task_alias": "human behavior" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa23be46c1af606deb01c860a4703d30edda019d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml @@ -0,0 +1,7 @@ +"dataset_name": "insurance_studies" +"description": "以下為保險學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_insurance_studies" +"task_alias": "insurance studies" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6434d3eae80048a24afc61a9a042d931fa54fa05 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml @@ -0,0 +1,7 @@ +"dataset_name": "introduction_to_law" +"description": "以下為法律概論的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_introduction_to_law" +"task_alias": "introduction to law" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ff3bed0731b042baaaed575011b1c0ea6a26aff --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml @@ -0,0 +1,7 @@ +"dataset_name": "jce_humanities" +"description": "以下為指考人文科目的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_jce_humanities" +"task_alias": "jce humanities" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de9c0691cf1e5d3c4773f831c77d8056f355277f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_chemistry" +"description": "以下為國中理化的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_chemistry" +"task_alias": "junior chemistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..937090520ef5e21d5877a46d4c9b1530b56ecba1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_chinese_exam" +"description": "以下為國中會考基測國文的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_chinese_exam" +"task_alias": "junior chinese exam" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a869a55f1445123550389b44b718df00d4dd2ef5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_math_exam" +"description": "以下為國中會考基測數學科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_math_exam" +"task_alias": "junior math exam" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed4b8d272c94356d6f2de8b600304470b75134ea --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_science_exam" +"description": "以下為國中會考基測自然科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_science_exam" +"task_alias": "junior science exam" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2286dbca9b66f3320f8db206b0c1dc279b6069e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml @@ -0,0 +1,7 @@ +"dataset_name": "linear_algebra" +"description": "以下為線代的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_linear_algebra" +"task_alias": "linear algebra" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..059bef3d9687e0f38383f6984c12aedfdfbc5b00 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml @@ -0,0 +1,7 @@ +"dataset_name": "logic_reasoning" +"description": "以下為邏輯思維的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_logic_reasoning" +"task_alias": "logic reasoning" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91009abe691ffcc0c910729244620557ccad2d6c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "macroeconomics" +"description": "以下為總經的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_macroeconomics" +"task_alias": "macroeconomics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da39f0a879b33956012c8f2fefba88586a9c4b4d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "marketing_management" +"description": "以下為行銷管理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_marketing_management" +"task_alias": "marketing management" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72864c0035da8cb92b491773be7a8a5e8a3b1685 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml @@ -0,0 +1,7 @@ +"dataset_name": "music" +"description": "以下為音樂科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_music" +"task_alias": "music" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62e98266d83a247c9e56f119316780dedac1369e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml @@ -0,0 +1,7 @@ +"dataset_name": "national_protection" +"description": "以下為軍事的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_national_protection" +"task_alias": "national protection" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_nautical_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_nautical_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e02c1dbc2a9d78354f71330bacdea52803abf8cb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_nautical_science.yaml @@ -0,0 +1,7 @@ +"dataset_name": "nautical_science" +"description": "以下為航海的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_nautical_science" +"task_alias": "nautical science" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba2bfa827ed9f5edd8b0799fa9ca9127e16f7f4e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml @@ -0,0 +1,7 @@ +"dataset_name": "occupational_therapy_for_psychological_disorders" +"description": "以下為心理障礙職能治療學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_occupational_therapy_for_psychological_disorders" +"task_alias": "occupational therapy for psychological disorders" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_official_document_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_official_document_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..77dce1d671784c86e11899e10ba9ec8c2a1d7b54 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_official_document_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "official_document_management" +"description": "以下為機關文書的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_official_document_management" +"task_alias": "official document management" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e3b78b7edd3136d3ed8a10d5e959d3fb72bc7bd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "optometry" +"description": "以下為視光學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_optometry" +"task_alias": "optometry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a096f9b4a242f12b207c6daa453d0dc2217e1f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "organic_chemistry" +"description": "以下為有機化學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_organic_chemistry" +"task_alias": "organic chemistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0da7ec9842f7bdedaa266b48f4b88a7518966d80 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "pharmacology" +"description": "以下為藥理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_pharmacology" +"task_alias": "pharmacology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a87aa4be10228833b31b3c29c9bda9d6f5dcf8bf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml @@ -0,0 +1,7 @@ +"dataset_name": "pharmacy" +"description": "以下為藥劑學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_pharmacy" +"task_alias": "pharmacy" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a8443fe19d0e33011093547f6ada042188a5cee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "physics" +"description": "以下為物理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_physics" +"task_alias": "physics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_politic_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_politic_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67d2068c5d1bd046263992fe4150fc4519e1345a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_politic_science.yaml @@ -0,0 +1,7 @@ +"dataset_name": "politic_science" +"description": "以下為政治的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_politic_science" +"task_alias": "politic science" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba90b7aa565bf0102967508392d286e13c25a747 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml @@ -0,0 +1,7 @@ +"dataset_name": "real_estate" +"description": "以下為房地產的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_real_estate" +"task_alias": "real estate" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6865167cb1f51310ba30d7b4745e62bc878c5d8f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "secondary_physics" +"description": "以下為高中物理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_secondary_physics" +"task_alias": "secondary physics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a6e68ba514f60c8c2f6760a80a09a3cd65eb1a0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml @@ -0,0 +1,7 @@ +"dataset_name": "statistics_and_machine_learning" +"description": "以下為統計與機器學習的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_statistics_and_machine_learning" +"task_alias": "statistics and machine learning" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89297df3158681f837462d90ead8660b563ee3e0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml @@ -0,0 +1,7 @@ +"dataset_name": "taiwanese_hokkien" +"description": "以下為閩南語的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_taiwanese_hokkien" +"task_alias": "taiwanese hokkien" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taxation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taxation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f54520270fb40d6c91b9ef508235a938b87be190 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_taxation.yaml @@ -0,0 +1,7 @@ +"dataset_name": "taxation" +"description": "以下為稅務的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_taxation" +"task_alias": "taxation" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6167a8fe0f63000a8d714ef2ed286ed950297d54 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml @@ -0,0 +1,7 @@ +"dataset_name": "technical" +"description": "以下為技術工相關的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_technical" +"task_alias": "technical" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_three_principles_of_people.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_three_principles_of_people.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de50db700ba23b2941ece04a5f0d4eb0999ffe10 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_three_principles_of_people.yaml @@ -0,0 +1,7 @@ +"dataset_name": "three_principles_of_people" +"description": "以下為三民主義的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_three_principles_of_people" +"task_alias": "three principles of people" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c42e22e034952c8e26ebff70d3b028e5aee643e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml @@ -0,0 +1,7 @@ +"dataset_name": "trade" +"description": "以下為貿易的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_trade" +"task_alias": "trade" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c1e7ae3f44ea9c3fe3f11d41cb1353f787359b4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml @@ -0,0 +1,7 @@ +"dataset_name": "trust_practice" +"description": "以下為信託實務的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_trust_practice" +"task_alias": "trust practice" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2216d4d46ca7331e2eae24a24affcaf51b68225f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml @@ -0,0 +1,7 @@ +"dataset_name": "ttqav2" +"description": "以下為台灣在地用語的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_ttqav2" +"task_alias": "ttqav2" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c98fedd3725c1bcf7899328bb9b51a3b3ab5e02 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_chinese_language" +"description": "以下為統測國文的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_chinese_language" +"task_alias": "tve chinese language" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e36b1548025bb6a9a9f95cf58833ba94465aab4e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_design" +"description": "以下為統測 設計的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_design" +"task_alias": "tve design" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4158f17e9cf040bd5c38419493c2c3c50227ff4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_mathematics" +"description": "以下為統測數學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_mathematics" +"task_alias": "tve mathematics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2ecf4ad00c56eb7835bbdf2ce8d5abb33457d07 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_natural_sciences" +"description": "以下為統測自然科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_natural_sciences" +"task_alias": "tve natural sciences" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1fcb43897705ecf140fcbc34c69fb0b74f66331 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "veterinary_pathology" +"description": "以下為獸醫病理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_veterinary_pathology" +"task_alias": "veterinary pathology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pharmacology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pharmacology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45c6553b2985013ae44ddaa401b7e2a10cfa59ee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pharmacology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "veterinary_pharmacology" +"description": "以下為獸醫藥理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_veterinary_pharmacology" +"task_alias": "veterinary pharmacology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e406d28293586763eaf73d4452a221ce97948041 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/tmmluplus/default/utils.py @@ -0,0 +1,16 @@ +import datasets + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _helper(doc): + # modifies the contents of a single + # document in our dataset. + answer_list = ["A", "B", "C", "D"] + out_doc = { + "questions": doc["question"], + "choices": [doc["A"], doc["B"], doc["C"], doc["D"]], + "goal": answer_list.index(doc["answer"]), + } + return out_doc + + return dataset.map(_helper) # returns back a datasets.Dataset object diff --git a/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f29db6ca5406429a4a3c98a884e6154556554f64 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/README.md @@ -0,0 +1,53 @@ +# TruthfulQA + +### Paper + +Title: `TruthfulQA: Measuring How Models Mimic Human Falsehoods` +Abstract: `https://arxiv.org/abs/2109.07958` + +Homepage: `https://github.com/sylinrl/TruthfulQA` + + +### Citation + +``` +@inproceedings{lin-etal-2022-truthfulqa, + title = "{T}ruthful{QA}: Measuring How Models Mimic Human Falsehoods", + author = "Lin, Stephanie and + Hilton, Jacob and + Evans, Owain", + booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", + month = may, + year = "2022", + address = "Dublin, Ireland", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.acl-long.229", + doi = "10.18653/v1/2022.acl-long.229", + pages = "3214--3252", +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `truthfulqa_mc1`: `Multiple-choice, single answer` +* (MISSING)`truthfulqa_mc2`: `Multiple-choice, multiple answers` +* (MISSING)`truthfulqa_gen`: `Answer generation` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/truthfulqa_mc2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/truthfulqa_mc2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0599b9d6be4c3c5baad27192b0b5601c0b3dc385 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/truthfulqa_mc2.yaml @@ -0,0 +1,13 @@ +include: truthfulqa_mc1.yaml +task: truthfulqa_mc2 +doc_to_target: 0 +doc_to_choice: "{{mc2_targets.choices}}" +process_results: !function utils.process_results_mc2 +should_decontaminate: True +doc_to_decontamination_query: question +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..399969ca5cb9d11cd2ff89880355082b495556e8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/truthfulqa/utils.py @@ -0,0 +1,167 @@ +import datasets +import numpy as np +import sacrebleu +from rouge_score import rouge_scorer, scoring + + +def process_results_mc2(doc, results): + lls, is_greedy = zip(*results) + + # Split on the first `0` as everything before it is true (`1`). + split_idx = list(doc["mc2_targets"]["labels"]).index(0) + # Compute the normalized probability mass for the correct answer. + ll_true, ll_false = lls[:split_idx], lls[split_idx:] + p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false)) + p_true = p_true / (sum(p_true) + sum(p_false)) + + return {"acc": sum(p_true)} + + +def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset: + return dataset.map(preprocess_function) + + +def preprocess_function(examples): + def _format_answers(answers): + formatted_answers = [] + for answer in answers: + answer = answer.strip() + if len(answer): + # Add a period after all answers. + if answer[-1] != ".": + formatted_answers.append(answer + ".") + else: + formatted_answers.append(answer) + return formatted_answers + + incorrect_answers = _format_answers(examples["incorrect_answers"]) + correct_answers = _format_answers(examples["correct_answers"]) + if "I have no comment." not in correct_answers: + correct_answers.append("I have no comment.") + return { + "question": examples["question"].strip(), + "correct_answers": correct_answers, + "incorrect_answers": incorrect_answers, + } + + +def process_results_gen(doc, results): + completion = results[0] + true_refs, false_refs = doc["correct_answers"], doc["incorrect_answers"] + all_refs = true_refs + false_refs + + # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures. + + # # BLEURT + # bleurt_scores_true = self.bleurt.compute( + # predictions=[completion] * len(true_refs), references=true_refs + # )["scores"] + # bleurt_scores_false = self.bleurt.compute( + # predictions=[completion] * len(false_refs), references=false_refs + # )["scores"] + # bleurt_correct = max(bleurt_scores_true) + # bleurt_incorrect = max(bleurt_scores_false) + # bleurt_max = bleurt_correct + # bleurt_diff = bleurt_correct - bleurt_incorrect + # bleurt_acc = int(bleurt_correct > bleurt_incorrect) + + # BLEU + bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs] + bleu_correct = np.nanmax(bleu_scores[: len(true_refs)]) + bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :]) + bleu_max = bleu_correct + bleu_diff = bleu_correct - bleu_incorrect + bleu_acc = int(bleu_correct > bleu_incorrect) + + # ROUGE-N + rouge_scores = [rouge([ref], [completion]) for ref in all_refs] + # ROUGE-1 + rouge1_scores = [score["rouge1"] for score in rouge_scores] + rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)]) + rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :]) + rouge1_max = rouge1_correct + rouge1_diff = rouge1_correct - rouge1_incorrect + rouge1_acc = int(rouge1_correct > rouge1_incorrect) + # ROUGE-2 + rouge2_scores = [score["rouge2"] for score in rouge_scores] + rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)]) + rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :]) + rouge2_max = rouge2_correct + rouge2_diff = rouge2_correct - rouge2_incorrect + rouge2_acc = int(rouge2_correct > rouge2_incorrect) + # ROUGE-L + rougeL_scores = [score["rougeLsum"] for score in rouge_scores] + rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)]) + rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :]) + rougeL_max = rougeL_correct + rougeL_diff = rougeL_correct - rougeL_incorrect + rougeL_acc = int(rougeL_correct > rougeL_incorrect) + + return { + # "bleurt_max": bleurt_max, + # "bleurt_acc": bleurt_acc, + # "bleurt_diff": bleurt_diff, + "bleu_max": bleu_max, + "bleu_acc": bleu_acc, + "bleu_diff": bleu_diff, + "rouge1_max": rouge1_max, + "rouge1_acc": rouge1_acc, + "rouge1_diff": rouge1_diff, + "rouge2_max": rouge2_max, + "rouge2_acc": rouge2_acc, + "rouge2_diff": rouge2_diff, + "rougeL_max": rougeL_max, + "rougeL_acc": rougeL_acc, + "rougeL_diff": rougeL_diff, + } + + +def bleu(refs, preds): + """ + Returns `t5` style BLEU scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41 + + :param refs: + A `list` of `list` of reference `str`s. + :param preds: + A `list` of predicted `str`s. + """ + score = sacrebleu.corpus_bleu( + preds, + refs, + smooth_method="exp", + smooth_value=0.0, + force=False, + lowercase=False, + tokenize="intl", + use_effective_order=False, + ).score + return score + + +def rouge(refs, preds): + """ + Returns `t5` style ROUGE scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68 + + :param refs: + A `list` of reference `strs`. + :param preds: + A `list` of predicted `strs`. + """ + rouge_types = ["rouge1", "rouge2", "rougeLsum"] + scorer = rouge_scorer.RougeScorer(rouge_types) + # Add newlines between sentences to correctly compute `rougeLsum`. + + def _prepare_summary(summary): + summary = summary.replace(" . ", ".\n") + return summary + + # Accumulate confidence intervals. + aggregator = scoring.BootstrapAggregator() + for ref, pred in zip(refs, preds): + ref = _prepare_summary(ref) + pred = _prepare_summary(pred) + aggregator.add_scores(scorer.score(ref, pred)) + result = aggregator.aggregate() + return {type: result[type].mid.fmeasure * 100 for type in rouge_types} diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7f5ea3c3be4f84c0bf5c733dccce3c8d95931bda --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/README.md @@ -0,0 +1,60 @@ +# XCOPA + +### Paper + +Title: `XCOPA: A Multilingual Dataset for Causal Commonsense Reasoning` + +Abstract: https://ducdauge.github.io/files/xcopa.pdf + +The Cross-lingual Choice of Plausible Alternatives dataset is a benchmark to evaluate the ability of machine learning models to transfer commonsense reasoning across languages. +The dataset is the translation and reannotation of the English COPA (Roemmele et al. 2011) and covers 11 languages from 11 families and several areas around the globe. +The dataset is challenging as it requires both the command of world knowledge and the ability to generalise to new languages. +All the details about the creation of XCOPA and the implementation of the baselines are available in the paper. + +Homepage: https://github.com/cambridgeltl/xcopa + +### Citation + +``` +@inproceedings{ponti2020xcopa, + title={{XCOPA: A} Multilingual Dataset for Causal Commonsense Reasoning}, + author={Edoardo M. Ponti, Goran Glava\v{s}, Olga Majewska, Qianchu Liu, Ivan Vuli\'{c} and Anna Korhonen}, + booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)}, + year={2020}, + url={https://ducdauge.github.io/files/xcopa.pdf} +} +``` + +### Groups and Tasks + +#### Groups + +* `xcopa` + +#### Tasks + +* `xcopa_et`: Estonian +* `xcopa_ht`: Haitian Creole +* `xcopa_id`: Indonesian +* `xcopa_it`: Italian +* `xcopa_qu`: Cusco-Collao Quechua +* `xcopa_sw`: Kiswahili +* `xcopa_ta`: Tamil +* `xcopa_th`: Thai +* `xcopa_tr`: Turkish +* `xcopa_vi`: Vietnamese +* `xcopa_zh`: Mandarin Chinese + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_et.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_et.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f2b0b73b585ab1a6e4c946237e52dba283a830a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_et.yaml @@ -0,0 +1,14 @@ +group: xcopa +task: xcopa_et +dataset_path: xcopa +dataset_name: et +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: !function utils.doc_to_text_et +doc_to_target: label +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ht.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ht.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21e22e1a6ecfe560de9f8ee2f19423b182d0df39 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ht.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_ht +dataset_name: ht +doc_to_text: !function utils.doc_to_text_ht diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_id.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_id.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08fda55c8bba30023936fc11c2efa8de6007125c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_id.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_id +dataset_name: id +doc_to_text: !function utils.doc_to_text_id diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_sw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4174cb0ef3b639ad5d2817dc45640c66bd9401c7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_sw.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_sw +dataset_name: sw +doc_to_text: !function utils.doc_to_text_sw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ta.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..216cacf89bd233858e613909e32e4b909c6bb338 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_ta.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_ta +dataset_name: ta +doc_to_text: !function utils.doc_to_text_ta diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_th.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_th.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90346b8c85be2ccff6e12ffcd64f3bd9ccb1ed70 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_th.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_th +dataset_name: th +doc_to_text: !function utils.doc_to_text_th diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_tr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_tr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81dac28670f00227b641fe4af46ad1542f7d173e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_tr.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_tr +dataset_name: tr +doc_to_text: !function utils.doc_to_text_tr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad681e6a86dca8a3aae5b06af8835eb96bf1768c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/default_zh.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_zh +dataset_name: zh +doc_to_text: !function utils.doc_to_text_zh diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xcopa/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9d85920baa7098fd20f853da6eadcbc787dedd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xcopa/utils.py @@ -0,0 +1,114 @@ +from functools import partial + + +def convert_choice(choice): + return choice[0].lower() + choice[1:] + + +def doc_to_text(doc, connector): + # Drop the period + conn = connector[doc["question"]] + return doc["premise"].strip()[:-1] + f" {conn}" + + +def doc_to_choice(doc): + return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])] + + +doc_to_text_et = partial( + doc_to_text, + connector={ + "cause": "sest", + "effect": "seetõttu", + }, +) + + +doc_to_text_ht = partial( + doc_to_text, + connector={ + "cause": "poukisa", + "effect": "donk sa", + }, +) + + +doc_to_text_it = partial( + doc_to_text, + connector={ + "cause": "perché", + "effect": "quindi", + }, +) + + +doc_to_text_id = partial( + doc_to_text, + connector={ + "cause": "karena", + "effect": "maka", + }, +) + + +doc_to_text_qu = partial( + doc_to_text, + connector={ + "cause": "imataq", + "effect": "chaymi", + }, +) + + +doc_to_text_sw = partial( + doc_to_text, + connector={ + "cause": "kwa sababu", + "effect": "kwa hiyo", + }, +) + + +doc_to_text_zh = partial( + doc_to_text, + connector={ + "cause": "因为", + "effect": "所以", + }, +) + + +doc_to_text_ta = partial( + doc_to_text, + connector={ + "cause": "காரணமாக", + "effect": "எனவே", + }, +) + + +doc_to_text_th = partial( + doc_to_text, + connector={ + "cause": "เพราะ", + "effect": "ดังนั้น", + }, +) + + +doc_to_text_tr = partial( + doc_to_text, + connector={ + "cause": "çünkü", + "effect": "bu yüzden", + }, +) + + +doc_to_text_vi = partial( + doc_to_text, + connector={ + "cause": "bởi vì", + "effect": "vì vậy", + }, +)