diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6cbe975fd9e1bbd927244ded96ee3f713273df1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_ejauxiliar +include: eus_exams_es +task: eus_exams_es_ejauxiliar diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0adfba26dd70eb54f0f50ac2ad2d97e83aaf57b8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_ejsubalterno +include: eus_exams_es +task: eus_exams_es_ejsubalterno diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88316e512a4dabff3e550f84f3401216316991a7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehuaux +include: eus_exams_es +task: eus_exams_es_opeehuaux diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4749cac111d410d6b573a5365ec6778ec4645f2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakienf +include: eus_exams_es +task: eus_exams_es_opeosakienf diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..faa6c4b46c0de1d1f31fdc88f0acae96889eb080 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza4c +include: eus_exams_es +task: eus_exams_es_osakidetza4c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..153ce3add3ebe5f9d9da505da1e5c5affe0d1263 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza5c +include: eus_exams_es +task: eus_exams_es_osakidetza5c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5630ddb05864cd3d6031ea8fed96e9715fb8990 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejadministrari +include: eus_exams_eu +task: eus_exams_eu_ejadministrari diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bef6b6524507dbae9ebf9b07bbe7d41fca978996 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuderechoeu +include: eus_exams_eu +task: eus_exams_eu_opeehuderechoeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9211f39a162360b67e84399409b1617bc5cc1dd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opegasteizkoudala +include: eus_exams_eu +task: eus_exams_eu_opegasteizkoudala diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakivarioseu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakivarioseu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11801ddec6614d95862087fa85c3f7b6314d8ddc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakivarioseu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakivarioseu +include: eus_exams_eu +task: eus_exams_eu_opeosakivarioseu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza7e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza7e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..666e96a0e136045c884f81fcf62d007f41ea80b7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza7e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza7e +include: eus_exams_eu +task: eus_exams_eu_osakidetza7e diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/_mmlu.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/_mmlu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..584de02993acedc37b6d508967334f9a55675f6b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/_mmlu.yaml @@ -0,0 +1,6 @@ +group: mmlu +task: + - mmlu_stem + - mmlu_other + - mmlu_social_sciences + - mmlu_humanities diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df9c4a51b6ca2715b921f2a33ef4755ebfacbc46 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml @@ -0,0 +1,8 @@ +"dataset_name": "computer_security" +"description": "The following are multiple choice questions (with answers) about computer\ + \ security.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_computer_security" +"task_alias": "computer_security" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a974fc8462d649c27ac09192d3cf84d9b162f82d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "econometrics" +"description": "The following are multiple choice questions (with answers) about econometrics.\n\ + \n" +"group": "mmlu_social_sciences" +"group_alias": "social_sciences" +"include": "_default_template_yaml" +"task": "mmlu_econometrics" +"task_alias": "econometrics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2154ab65454c9234b57d76782315c6559b0e7e0f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary\ + \ mathematics.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_elementary_mathematics" +"task_alias": "elementary_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..988d132aad3309d086e8f8549bd6710f2bd2b817 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_macroeconomics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school macroeconomics.\n\n" +"group": "mmlu_social_sciences" +"group_alias": "social_sciences" +"include": "_default_template_yaml" +"task": "mmlu_high_school_macroeconomics" +"task_alias": "high_school_macroeconomics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54d70880eff3e5aa6ab8f8b233ee45b9f30ca25c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_statistics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school statistics.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_high_school_statistics" +"task_alias": "high_school_statistics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4432fe4459fcaa3c1ccf311c094ceb9122d0637 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school us history.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_high_school_us_history" +"task_alias": "high_school_us_history" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08773a20089a5551710cb82447d6ce23d65c367d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_world_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school world history.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_high_school_world_history" +"task_alias": "high_school_world_history" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e16de5c40b2a29cfc0cadeaad5ed11df9b7503e8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml @@ -0,0 +1,8 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\ + \n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_jurisprudence" +"task_alias": "jurisprudence" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2387d680b2d57d2d409ecc1bf744f512dd4fcef3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml @@ -0,0 +1,8 @@ +"dataset_name": "machine_learning" +"description": "The following are multiple choice questions (with answers) about machine\ + \ learning.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_machine_learning" +"task_alias": "machine_learning" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3a02631e28703dc8d3a6091175c49b9eb5676eb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml @@ -0,0 +1,8 @@ +"dataset_name": "professional_law" +"description": "The following are multiple choice questions (with answers) about professional\ + \ law.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_professional_law" +"task_alias": "professional_law" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8c49b5eb084d9cb2133c333f573ccbe3b8d9824 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml @@ -0,0 +1,8 @@ +"dataset_name": "professional_medicine" +"description": "The following are multiple choice questions (with answers) about professional\ + \ medicine.\n\n" +"group": "mmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "mmlu_professional_medicine" +"task_alias": "professional_medicine" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..efcbd27b8d653f2df89f52880d47fe5a461e1246 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml @@ -0,0 +1,8 @@ +"dataset_name": "sociology" +"description": "The following are multiple choice questions (with answers) about sociology.\n\ + \n" +"group": "mmlu_social_sciences" +"group_alias": "social_sciences" +"include": "_default_template_yaml" +"task": "mmlu_sociology" +"task_alias": "sociology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_virology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_virology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d935f92aa80e30f773960e6127466a4d7f768448 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/default/mmlu_virology.yaml @@ -0,0 +1,8 @@ +"dataset_name": "virology" +"description": "The following are multiple choice questions (with answers) about virology.\n\ + \n" +"group": "mmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "mmlu_virology" +"task_alias": "virology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..390425c7d248a99e385c73f19d2cfa9e6d27747c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml @@ -0,0 +1,6 @@ +group: mmlu_flan_cot_zeroshot +task: + - mmlu_flan_cot_zeroshot_stem + - mmlu_flan_cot_zeroshot_other + - mmlu_flan_cot_zeroshot_social_sciences + - mmlu_flan_cot_zeroshot_humanities diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..83a857b08c1d6e1f4e6b0d55993c10c2fd0b5bae --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml @@ -0,0 +1,36 @@ +dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split +validation_split: validation +fewshot_split: dev +output_type: generate_until +doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step." +doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}" +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: !function utils.MultiChoiceRegexFilter + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + - "Q:" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +num_fewshot: 0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8609f626a25d5f37d41ab8a312dfe226e44dbbd8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml @@ -0,0 +1,6 @@ +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract\ + \ algebra.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_abstract_algebra" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2923349d0fb02f219844b192fb38880537cb9c3a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml @@ -0,0 +1,6 @@ +"dataset_name": "anatomy" +"description": "The following are multiple choice questions (with answers) about anatomy.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_anatomy" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5ffd8ffe302442af98c246e9d7bac54c063d81f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml @@ -0,0 +1,6 @@ +"dataset_name": "astronomy" +"description": "The following are multiple choice questions (with answers) about astronomy.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_astronomy" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a64285711f2f23775ddc37431b5c39f5a589f9ec --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "business_ethics" +"description": "The following are multiple choice questions (with answers) about business\ + \ ethics.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_business_ethics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3655230cf9703b3d02bfa457f667d02676a3aa7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml @@ -0,0 +1,6 @@ +"dataset_name": "clinical_knowledge" +"description": "The following are multiple choice questions (with answers) about clinical\ + \ knowledge.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_clinical_knowledge" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..736bb6decd83da6ac642049a9bcf3c9af0f562f2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "college_biology" +"description": "The following are multiple choice questions (with answers) about college\ + \ biology.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_college_biology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b7199664bb086da78f2314bd69f69177c6116a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml @@ -0,0 +1,6 @@ +"dataset_name": "college_chemistry" +"description": "The following are multiple choice questions (with answers) about college\ + \ chemistry.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_college_chemistry" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..210eb127c831b310084a9d2df11f1dc29cb62d87 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "college_mathematics" +"description": "The following are multiple choice questions (with answers) about college\ + \ mathematics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_college_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51c8a3c09ef78f6d7d8249c4f244a5c63c249cd5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml @@ -0,0 +1,6 @@ +"dataset_name": "college_medicine" +"description": "The following are multiple choice questions (with answers) about college\ + \ medicine.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_college_medicine" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..319c72142454df340260c1a6dd3f59f244a57eab --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "college_physics" +"description": "The following are multiple choice questions (with answers) about college\ + \ physics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_college_physics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae4bda965ef4dd839b400959b391a71f1fcddcd3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml @@ -0,0 +1,6 @@ +"dataset_name": "computer_security" +"description": "The following are multiple choice questions (with answers) about computer\ + \ security.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_computer_security" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ff25bba4657133ff33a491c641589aed6476114 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "econometrics" +"description": "The following are multiple choice questions (with answers) about econometrics.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_econometrics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca10a43e910d6fe090af53ffaf90e645e1ad69a1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml @@ -0,0 +1,6 @@ +"dataset_name": "electrical_engineering" +"description": "The following are multiple choice questions (with answers) about electrical\ + \ engineering.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_electrical_engineering" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..065c92d200e4df5a303cc1f4269ada803524cd89 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary\ + \ mathematics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_elementary_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec2d323cae468b5efd9739929a0822dfb853e233 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml @@ -0,0 +1,6 @@ +"dataset_name": "formal_logic" +"description": "The following are multiple choice questions (with answers) about formal\ + \ logic.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_formal_logic" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e29a3e1379f3e09c72a4c25e92855c8ba42bd3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml @@ -0,0 +1,6 @@ +"dataset_name": "global_facts" +"description": "The following are multiple choice questions (with answers) about global\ + \ facts.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_global_facts" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e5794db64588edef17d3c396f96ef870383cfa3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_biology" +"description": "The following are multiple choice questions (with answers) about high\ + \ school biology.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_biology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eba398b0393383621f3d688ea5356409eb56b215 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_chemistry" +"description": "The following are multiple choice questions (with answers) about high\ + \ school chemistry.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_chemistry" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a69dbb3b9ca30313c8d333763a10796d1692bca --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_computer_science" +"description": "The following are multiple choice questions (with answers) about high\ + \ school computer science.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_computer_science" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54eafb51d385f7afd35a78d2ed8098565d1c5297 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_european_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school european history.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_european_history" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0898c87664e5250530d6998337c8fc601e1b876d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_geography" +"description": "The following are multiple choice questions (with answers) about high\ + \ school geography.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_geography" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d82fb6b01612fa74173989d6a7297d4f7521d3dd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_government_and_politics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school government and politics.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_government_and_politics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b94fc2a6e356b8099d0b7a28f2ba07395d6b8599 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_macroeconomics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school macroeconomics.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_macroeconomics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a08c48bd9b4c7b44df1b07064bf920b7c4f8af --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_microeconomics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school microeconomics.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_microeconomics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..177d42da511dde59f1baf00f1212834b483e3426 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_physics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school physics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_physics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5d477233122391157ebe6ce3b817902c5a39712 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_psychology" +"description": "The following are multiple choice questions (with answers) about high\ + \ school psychology.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_psychology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b245cf9e51337a10a12ae8c9a5df6f92e0144ec0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_statistics" +"description": "The following are multiple choice questions (with answers) about high\ + \ school statistics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_statistics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e187da2980912702558bab3098b363f3e68efcd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school us history.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_us_history" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..230781b4a5d4293f555f8fe1f6395d818de5eca9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml @@ -0,0 +1,6 @@ +"dataset_name": "human_aging" +"description": "The following are multiple choice questions (with answers) about human\ + \ aging.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_human_aging" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62b86dd01f37099fa29c1bdfbba5f1beb97fb509 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml @@ -0,0 +1,6 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_jurisprudence" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..07ae843871ca58cdce30815b0088cd7f7027e0d3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml @@ -0,0 +1,6 @@ +"dataset_name": "logical_fallacies" +"description": "The following are multiple choice questions (with answers) about logical\ + \ fallacies.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_logical_fallacies" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd4813effe182c7e3fc8270d23fd1f3e7d882836 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml @@ -0,0 +1,6 @@ +"dataset_name": "machine_learning" +"description": "The following are multiple choice questions (with answers) about machine\ + \ learning.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_machine_learning" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7164c1cfcb00b9359809173da72dad15383143c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml @@ -0,0 +1,6 @@ +"dataset_name": "management" +"description": "The following are multiple choice questions (with answers) about management.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_management" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0827f78df766331ccdf6235a4b4120a089cdd9c9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml @@ -0,0 +1,6 @@ +"dataset_name": "marketing" +"description": "The following are multiple choice questions (with answers) about marketing.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_marketing" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1706ee5b4ac151a36e7a75b638d75e8194696f8d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "medical_genetics" +"description": "The following are multiple choice questions (with answers) about medical\ + \ genetics.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_medical_genetics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml new file mode 100644 index 0000000000000000000000000000000000000000..295d801a470ac7671d6f75c0527b6d508a70a6f8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml @@ -0,0 +1,6 @@ +"dataset_name": "miscellaneous" +"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_miscellaneous" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4595f06991b1096e49325c897fbe6f0b3eea6c2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml @@ -0,0 +1,6 @@ +"dataset_name": "moral_disputes" +"description": "The following are multiple choice questions (with answers) about moral\ + \ disputes.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_moral_disputes" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0e41ae4ea1346ac76c56bcae3682b86610fe04f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml @@ -0,0 +1,6 @@ +"dataset_name": "moral_scenarios" +"description": "The following are multiple choice questions (with answers) about moral\ + \ scenarios.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_moral_scenarios" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3233ba4e3b7396b96ea2f3e4788d2e70b670abf4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml @@ -0,0 +1,6 @@ +"dataset_name": "prehistory" +"description": "The following are multiple choice questions (with answers) about prehistory.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_prehistory" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73d115d73c03e608117f7d3f6bf17efd688041ba --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml @@ -0,0 +1,6 @@ +"dataset_name": "professional_law" +"description": "The following are multiple choice questions (with answers) about professional\ + \ law.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_professional_law" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47cf9573d08e1f911af7fbfbd984358fecd275d9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml @@ -0,0 +1,6 @@ +"dataset_name": "professional_medicine" +"description": "The following are multiple choice questions (with answers) about professional\ + \ medicine.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_professional_medicine" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14d02c3a3c015e78cb780c646461fb7ac70a5ce4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml @@ -0,0 +1,6 @@ +"dataset_name": "public_relations" +"description": "The following are multiple choice questions (with answers) about public\ + \ relations.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_public_relations" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cae551e2adf6e28153c612f7059f527c116f3d10 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml @@ -0,0 +1,6 @@ +"dataset_name": "security_studies" +"description": "The following are multiple choice questions (with answers) about security\ + \ studies.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_security_studies" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45b94193c55ac43e7ee6dc33462e128748a68c21 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "sociology" +"description": "The following are multiple choice questions (with answers) about sociology.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_sociology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52e482775f7f5873ccf19628a1e4bb85baf4b2b0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml @@ -0,0 +1,6 @@ +"dataset_name": "us_foreign_policy" +"description": "The following are multiple choice questions (with answers) about us\ + \ foreign policy.\n\n" +"group": "mmlu_flan_cot_zeroshot_social_sciences" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_us_foreign_policy" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fda1af06360f4f753dee2f3ca6aa4f2720558965 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml @@ -0,0 +1,6 @@ +"dataset_name": "virology" +"description": "The following are multiple choice questions (with answers) about virology.\n\ + \n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_virology" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4051828232c392e69408f8ad71d4bbbbc41a260f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml @@ -0,0 +1,6 @@ +"dataset_name": "world_religions" +"description": "The following are multiple choice questions (with answers) about world\ + \ religions.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_world_religions" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..72246935de8cf0cf8b256fd1e6c87dfbbb90a2ad --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py @@ -0,0 +1,112 @@ +import re +import sys +import unicodedata + +from lm_eval.filters.extraction import RegexFilter + + +class MultiChoiceRegexFilter(RegexFilter): + """ """ + + def __init__( + self, + regex_pattern: str = r"#### (\-?[0-9\.\,]+)", + group_select=0, + fallback: str = "[invalid]", + ignore_case=False, + ignore_punctuation=False, + regexes_to_ignore=None, + ) -> None: + """ + regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure + - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response. + - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices. + group_select: Selects the (group_select)th match from the findall result. + ignore_case: Ignores the case during step 1 matching + ignore_punctuation: Remove the punctuation during step 1 matching + regexes_to_ignore: Remove these regexes during step 1 matching + """ + super().__init__(regex_pattern, group_select, fallback) + self.ignore_case = ignore_case + self.ignore_punctuation = ignore_punctuation + self.regexes_to_ignore = regexes_to_ignore + + def apply(self, resps, docs): + # here, we assume we have a list, in which each element is + # a list of model responses for some particular input/target pair. + # so we process each of these (same input/target response sets) + # independently (and keep them a list.) + + def find_match(regex, resp, convert_dict={}): + match = regex.findall(resp) + if match: + match = match[self.group_select] + if isinstance(match, tuple): + match = [m for m in match if m][0] + match = match.strip() + if match and match in convert_dict: + match = convert_dict[match] + return match + + punct_tbl = dict.fromkeys( + i + for i in range(sys.maxunicode) + if unicodedata.category(chr(i)).startswith("P") + ) + + def filter_ignores(st): + if self.regexes_to_ignore is not None: + for s in self.regexes_to_ignore: + st = re.sub(s, "", st) + + if self.ignore_case: + st = st.lower() + + if self.ignore_punctuation: + # https://stackoverflow.com/a/266162 + st = st.translate(punct_tbl) + return st + + filtered_resps = [] + + for r, doc in zip(resps, docs): + fallback_regexes = [] + choice_to_alpha = {} + next_alpha = "A" + + without_paren_fallback_regexes = [] + without_paren_to_target = {} + + choices = doc["choices"] + for c in choices: + m = filter_ignores(c.strip()) + fallback_regexes.append(f"{re.escape(m)}") + choice_to_alpha[m] = f"({next_alpha})" + + without_paren_fallback_regexes.append(next_alpha) + without_paren_to_target[next_alpha] = f"({next_alpha})" + + next_alpha = chr(ord(next_alpha) + 1) + fallback_regex = re.compile("|".join(fallback_regexes)) + without_paren_fallback_regex = "|".join(without_paren_fallback_regexes) + without_paren_fallback_regex = re.compile( + f":[\s]*({without_paren_fallback_regex})" + ) + + filtered = [] + for resp in r: + match = find_match(self.regex, resp) + if not match: + match = find_match( + fallback_regex, filter_ignores(resp), choice_to_alpha + ) + if not match: + match = find_match( + without_paren_fallback_regex, resp, without_paren_to_target + ) + if not match: + match = self.fallback + filtered.append(match) + filtered_resps.append(filtered) + + return filtered_resps