Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/_mmlu.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_management.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_marketing.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml +8 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml +36 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml +6 -0
lm-evaluation-harness/lm_eval/tasks/mmlu/default/_mmlu.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: mmlu
|
2 |
+
task:
|
3 |
+
- mmlu_stem
|
4 |
+
- mmlu_other
|
5 |
+
- mmlu_social_sciences
|
6 |
+
- mmlu_humanities
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about anatomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_anatomy"
|
8 |
+
"task_alias": "anatomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "astronomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about astronomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_astronomy"
|
8 |
+
"task_alias": "astronomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "clinical_knowledge"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about clinical\
|
3 |
+
\ knowledge.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_clinical_knowledge"
|
8 |
+
"task_alias": "clinical_knowledge"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_biology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ biology.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_college_biology"
|
8 |
+
"task_alias": "college_biology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ chemistry.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_college_chemistry"
|
8 |
+
"task_alias": "college_chemistry"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_college_mathematics"
|
8 |
+
"task_alias": "college_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "conceptual_physics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about conceptual\
|
3 |
+
\ physics.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_conceptual_physics"
|
8 |
+
"task_alias": "conceptual_physics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "econometrics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about econometrics.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_econometrics"
|
8 |
+
"task_alias": "econometrics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "electrical_engineering"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about electrical\
|
3 |
+
\ engineering.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_electrical_engineering"
|
8 |
+
"task_alias": "electrical_engineering"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "formal_logic"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about formal\
|
3 |
+
\ logic.\n\n"
|
4 |
+
"group": "mmlu_humanities"
|
5 |
+
"group_alias": "humanities"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_formal_logic"
|
8 |
+
"task_alias": "formal_logic"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "global_facts"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about global\
|
3 |
+
\ facts.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_global_facts"
|
8 |
+
"task_alias": "global_facts"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school biology.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_biology"
|
8 |
+
"task_alias": "high_school_biology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_computer_science"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school computer science.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_computer_science"
|
8 |
+
"task_alias": "high_school_computer_science"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_geography"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school geography.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_geography"
|
8 |
+
"task_alias": "high_school_geography"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_government_and_politics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school government and politics.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_government_and_politics"
|
8 |
+
"task_alias": "high_school_government_and_politics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_macroeconomics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school macroeconomics.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_macroeconomics"
|
8 |
+
"task_alias": "high_school_macroeconomics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school mathematics.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_mathematics"
|
8 |
+
"task_alias": "high_school_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_microeconomics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school microeconomics.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_microeconomics"
|
8 |
+
"task_alias": "high_school_microeconomics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_physics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school physics.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_physics"
|
8 |
+
"task_alias": "high_school_physics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_psychology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school psychology.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_psychology"
|
8 |
+
"task_alias": "high_school_psychology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_statistics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school statistics.\n\n"
|
4 |
+
"group": "mmlu_stem"
|
5 |
+
"group_alias": "stem"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_high_school_statistics"
|
8 |
+
"task_alias": "high_school_statistics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "human_aging"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about human\
|
3 |
+
\ aging.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_human_aging"
|
8 |
+
"task_alias": "human_aging"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "human_sexuality"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about human\
|
3 |
+
\ sexuality.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_human_sexuality"
|
8 |
+
"task_alias": "human_sexuality"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_management.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "management"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about management.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_management"
|
8 |
+
"task_alias": "management"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_marketing.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "marketing"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about marketing.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_marketing"
|
8 |
+
"task_alias": "marketing"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "medical_genetics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about medical\
|
3 |
+
\ genetics.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_medical_genetics"
|
8 |
+
"task_alias": "medical_genetics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "miscellaneous"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_miscellaneous"
|
8 |
+
"task_alias": "miscellaneous"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "prehistory"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about prehistory.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_humanities"
|
5 |
+
"group_alias": "humanities"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_prehistory"
|
8 |
+
"task_alias": "prehistory"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_accounting"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ accounting.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_professional_accounting"
|
8 |
+
"task_alias": "professional_accounting"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_law"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ law.\n\n"
|
4 |
+
"group": "mmlu_humanities"
|
5 |
+
"group_alias": "humanities"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_professional_law"
|
8 |
+
"task_alias": "professional_law"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_medicine"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ medicine.\n\n"
|
4 |
+
"group": "mmlu_other"
|
5 |
+
"group_alias": "other"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_professional_medicine"
|
8 |
+
"task_alias": "professional_medicine"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_psychology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ psychology.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_professional_psychology"
|
8 |
+
"task_alias": "professional_psychology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "public_relations"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about public\
|
3 |
+
\ relations.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_public_relations"
|
8 |
+
"task_alias": "public_relations"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "security_studies"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about security\
|
3 |
+
\ studies.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_security_studies"
|
8 |
+
"task_alias": "security_studies"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_sociology.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sociology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about sociology.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_sociology"
|
8 |
+
"task_alias": "sociology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "us_foreign_policy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about us\
|
3 |
+
\ foreign policy.\n\n"
|
4 |
+
"group": "mmlu_social_sciences"
|
5 |
+
"group_alias": "social_sciences"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_us_foreign_policy"
|
8 |
+
"task_alias": "us_foreign_policy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "world_religions"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about world\
|
3 |
+
\ religions.\n\n"
|
4 |
+
"group": "mmlu_humanities"
|
5 |
+
"group_alias": "humanities"
|
6 |
+
"include": "_default_template_yaml"
|
7 |
+
"task": "mmlu_world_religions"
|
8 |
+
"task_alias": "world_religions"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: mmlu_flan_cot_zeroshot
|
2 |
+
task:
|
3 |
+
- mmlu_flan_cot_zeroshot_stem
|
4 |
+
- mmlu_flan_cot_zeroshot_other
|
5 |
+
- mmlu_flan_cot_zeroshot_social_sciences
|
6 |
+
- mmlu_flan_cot_zeroshot_humanities
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split
|
2 |
+
validation_split: validation
|
3 |
+
fewshot_split: dev
|
4 |
+
output_type: generate_until
|
5 |
+
doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: Let's think step by step."
|
6 |
+
doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}"
|
7 |
+
filter_list:
|
8 |
+
- name: "strict-match"
|
9 |
+
filter:
|
10 |
+
- function: "regex"
|
11 |
+
regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
|
12 |
+
- function: "take_first"
|
13 |
+
- name: "flexible-extract"
|
14 |
+
filter:
|
15 |
+
- function: !function utils.MultiChoiceRegexFilter
|
16 |
+
group_select: -1
|
17 |
+
ignore_case: true
|
18 |
+
ignore_punctuation: true
|
19 |
+
regex_pattern: "(\\([A-Z]\\))"
|
20 |
+
- function: "take_first"
|
21 |
+
generation_kwargs:
|
22 |
+
until:
|
23 |
+
- "</s>"
|
24 |
+
- "Q:"
|
25 |
+
- "<|im_end|>"
|
26 |
+
do_sample: false
|
27 |
+
temperature: 0.0
|
28 |
+
num_fewshot: 0
|
29 |
+
metric_list:
|
30 |
+
- metric: exact_match
|
31 |
+
aggregation: mean
|
32 |
+
higher_is_better: true
|
33 |
+
ignore_case: true
|
34 |
+
ignore_punctuation: true
|
35 |
+
metadata:
|
36 |
+
version: 1.0
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about anatomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_anatomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "clinical_knowledge"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about clinical\
|
3 |
+
\ knowledge.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_other"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_clinical_knowledge"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_biology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ biology.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_biology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ chemistry.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_chemistry"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_computer_science"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ computer science.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_computer_science"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_medicine"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ medicine.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_other"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_medicine"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_physics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ physics.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_college_physics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about elementary\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_stem"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_elementary_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "global_facts"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about global\
|
3 |
+
\ facts.\n\n"
|
4 |
+
"group": "mmlu_flan_cot_zeroshot_other"
|
5 |
+
"include": "_mmlu_flan_cot_zeroshot_template_yaml"
|
6 |
+
"task": "mmlu_flan_cot_zeroshot_global_facts"
|