Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu_flan_generative_template_yaml +30 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_abstract_algebra.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_anatomy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_astronomy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_business_ethics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_clinical_knowledge.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_chemistry.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_mathematics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_physics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_computer_security.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_electrical_engineering.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_elementary_mathematics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_formal_logic.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_biology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_european_history.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_geography.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_government_and_politics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_mathematics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_physics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_psychology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_statistics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_world_history.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_aging.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_sexuality.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_jurisprudence.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_logical_fallacies.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_management.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_medical_genetics.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_miscellaneous.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_moral_scenarios.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_nutrition.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_philosophy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_prehistory.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_accounting.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_psychology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_public_relations.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_security_studies.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_sociology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_us_foreign_policy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_virology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_world_religions.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/utils.py +112 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu_flan_loglikelihood_template_yaml +16 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_abstract_algebra.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_anatomy.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_biology.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_chemistry.yaml +6 -0
- lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_mathematics.yaml +6 -0
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: mmlu_flan_n_shot_generative
|
2 |
+
task:
|
3 |
+
- mmlu_flan_n_shot_generative_stem
|
4 |
+
- mmlu_flan_n_shot_generative_other
|
5 |
+
- mmlu_flan_n_shot_generative_social_sciences
|
6 |
+
- mmlu_flan_n_shot_generative_humanities
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu_flan_generative_template_yaml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: mmlu_flan_n_shot_generative
|
2 |
+
dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split
|
3 |
+
test_split: test
|
4 |
+
fewshot_split: dev
|
5 |
+
output_type: generate_until
|
6 |
+
doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: "
|
7 |
+
doc_to_target: "{{['(A)', '(B)', '(C)', '(D)'][answer]}}"
|
8 |
+
filter_list:
|
9 |
+
- name: "strict-match"
|
10 |
+
filter:
|
11 |
+
- function: "take_first"
|
12 |
+
- name: "flexible-extract"
|
13 |
+
filter:
|
14 |
+
- function: !function utils.MultiChoiceRegexFilter
|
15 |
+
group_select: 0
|
16 |
+
regex_pattern: "(\\([A-Z]\\))"
|
17 |
+
ignore_case: true
|
18 |
+
ignore_punctuation: true
|
19 |
+
- function: "take_first"
|
20 |
+
generation_kwargs:
|
21 |
+
until:
|
22 |
+
- "</s>"
|
23 |
+
- "Q:"
|
24 |
+
- "<|im_end|>"
|
25 |
+
metric_list:
|
26 |
+
- metric: exact_match
|
27 |
+
aggregation: mean
|
28 |
+
higher_is_better: true
|
29 |
+
metadata:
|
30 |
+
version: 1.0
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_abstract_algebra.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "abstract_algebra"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about abstract\
|
3 |
+
\ algebra.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_abstract_algebra"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_anatomy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about anatomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_anatomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_astronomy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "astronomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about astronomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_astronomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_business_ethics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "business_ethics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about business\
|
3 |
+
\ ethics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_business_ethics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_clinical_knowledge.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "clinical_knowledge"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about clinical\
|
3 |
+
\ knowledge.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_clinical_knowledge"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_chemistry.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ chemistry.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_college_chemistry"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_college_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_physics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_physics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ physics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_college_physics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_computer_security.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_security"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about computer\
|
3 |
+
\ security.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_computer_security"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_electrical_engineering.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "electrical_engineering"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about electrical\
|
3 |
+
\ engineering.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_electrical_engineering"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_elementary_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about elementary\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_elementary_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_formal_logic.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "formal_logic"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about formal\
|
3 |
+
\ logic.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_formal_logic"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_biology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school biology.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_biology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_european_history.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_european_history"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school european history.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_european_history"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_geography.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_geography"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school geography.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_geography"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_government_and_politics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_government_and_politics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school government and politics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_government_and_politics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_physics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_physics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school physics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_physics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_psychology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_psychology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school psychology.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_psychology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_statistics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_statistics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school statistics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_stem"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_statistics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_world_history.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_world_history"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about high\
|
3 |
+
\ school world history.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_high_school_world_history"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_aging.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "human_aging"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about human\
|
3 |
+
\ aging.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_human_aging"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_sexuality.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "human_sexuality"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about human\
|
3 |
+
\ sexuality.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_human_sexuality"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_jurisprudence.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "jurisprudence"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_jurisprudence"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_logical_fallacies.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical_fallacies"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about logical\
|
3 |
+
\ fallacies.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_logical_fallacies"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_management.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "management"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about management.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_management"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_medical_genetics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "medical_genetics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about medical\
|
3 |
+
\ genetics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_medical_genetics"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_miscellaneous.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "miscellaneous"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about miscellaneous.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_miscellaneous"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_moral_scenarios.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "moral_scenarios"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about moral\
|
3 |
+
\ scenarios.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_moral_scenarios"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_nutrition.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "nutrition"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about nutrition.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_nutrition"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_philosophy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "philosophy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about philosophy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_philosophy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_prehistory.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "prehistory"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about prehistory.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_prehistory"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_accounting.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_accounting"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ accounting.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_professional_accounting"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_psychology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_psychology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about professional\
|
3 |
+
\ psychology.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_professional_psychology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_public_relations.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "public_relations"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about public\
|
3 |
+
\ relations.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_public_relations"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_security_studies.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "security_studies"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about security\
|
3 |
+
\ studies.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_security_studies"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_sociology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sociology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about sociology.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_sociology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_us_foreign_policy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "us_foreign_policy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about us\
|
3 |
+
\ foreign policy.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_social_sciences"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_us_foreign_policy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_virology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "virology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about virology.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_other"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_virology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_world_religions.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "world_religions"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about world\
|
3 |
+
\ religions.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_generative_humanities"
|
5 |
+
"include": "_mmlu_flan_generative_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_generative_world_religions"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/generative/utils.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import sys
|
3 |
+
import unicodedata
|
4 |
+
|
5 |
+
from lm_eval.filters.extraction import RegexFilter
|
6 |
+
|
7 |
+
|
8 |
+
class MultiChoiceRegexFilter(RegexFilter):
|
9 |
+
""" """
|
10 |
+
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
|
14 |
+
group_select=0,
|
15 |
+
fallback: str = "[invalid]",
|
16 |
+
ignore_case=False,
|
17 |
+
ignore_punctuation=False,
|
18 |
+
regexes_to_ignore=None,
|
19 |
+
) -> None:
|
20 |
+
"""
|
21 |
+
regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
|
22 |
+
- step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
|
23 |
+
- step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
|
24 |
+
group_select: Selects the (group_select)th match from the findall result.
|
25 |
+
ignore_case: Ignores the case during step 1 matching
|
26 |
+
ignore_punctuation: Remove the punctuation during step 1 matching
|
27 |
+
regexes_to_ignore: Remove these regexes during step 1 matching
|
28 |
+
"""
|
29 |
+
super().__init__(regex_pattern, group_select, fallback)
|
30 |
+
self.ignore_case = ignore_case
|
31 |
+
self.ignore_punctuation = ignore_punctuation
|
32 |
+
self.regexes_to_ignore = regexes_to_ignore
|
33 |
+
|
34 |
+
def apply(self, resps, docs):
|
35 |
+
# here, we assume we have a list, in which each element is
|
36 |
+
# a list of model responses for some particular input/target pair.
|
37 |
+
# so we process each of these (same input/target response sets)
|
38 |
+
# independently (and keep them a list.)
|
39 |
+
|
40 |
+
def find_match(regex, resp, convert_dict={}):
|
41 |
+
match = regex.findall(resp)
|
42 |
+
if match:
|
43 |
+
match = match[self.group_select]
|
44 |
+
if isinstance(match, tuple):
|
45 |
+
match = [m for m in match if m][0]
|
46 |
+
match = match.strip()
|
47 |
+
if match and match in convert_dict:
|
48 |
+
match = convert_dict[match]
|
49 |
+
return match
|
50 |
+
|
51 |
+
punct_tbl = dict.fromkeys(
|
52 |
+
i
|
53 |
+
for i in range(sys.maxunicode)
|
54 |
+
if unicodedata.category(chr(i)).startswith("P")
|
55 |
+
)
|
56 |
+
|
57 |
+
def filter_ignores(st):
|
58 |
+
if self.regexes_to_ignore is not None:
|
59 |
+
for s in self.regexes_to_ignore:
|
60 |
+
st = re.sub(s, "", st)
|
61 |
+
|
62 |
+
if self.ignore_case:
|
63 |
+
st = st.lower()
|
64 |
+
|
65 |
+
if self.ignore_punctuation:
|
66 |
+
# https://stackoverflow.com/a/266162
|
67 |
+
st = st.translate(punct_tbl)
|
68 |
+
return st
|
69 |
+
|
70 |
+
filtered_resps = []
|
71 |
+
|
72 |
+
for r, doc in zip(resps, docs):
|
73 |
+
fallback_regexes = []
|
74 |
+
choice_to_alpha = {}
|
75 |
+
next_alpha = "A"
|
76 |
+
|
77 |
+
without_paren_fallback_regexes = []
|
78 |
+
without_paren_to_target = {}
|
79 |
+
|
80 |
+
choices = doc["choices"]
|
81 |
+
for c in choices:
|
82 |
+
m = filter_ignores(c.strip())
|
83 |
+
fallback_regexes.append(f"{re.escape(m)}")
|
84 |
+
choice_to_alpha[m] = f"({next_alpha})"
|
85 |
+
|
86 |
+
without_paren_fallback_regexes.append(next_alpha)
|
87 |
+
without_paren_to_target[next_alpha] = f"({next_alpha})"
|
88 |
+
|
89 |
+
next_alpha = chr(ord(next_alpha) + 1)
|
90 |
+
fallback_regex = re.compile("|".join(fallback_regexes))
|
91 |
+
without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
|
92 |
+
without_paren_fallback_regex = re.compile(
|
93 |
+
f":[\s]*({without_paren_fallback_regex})"
|
94 |
+
)
|
95 |
+
|
96 |
+
filtered = []
|
97 |
+
for resp in r:
|
98 |
+
match = find_match(self.regex, resp)
|
99 |
+
if not match:
|
100 |
+
match = find_match(
|
101 |
+
fallback_regex, filter_ignores(resp), choice_to_alpha
|
102 |
+
)
|
103 |
+
if not match:
|
104 |
+
match = find_match(
|
105 |
+
without_paren_fallback_regex, resp, without_paren_to_target
|
106 |
+
)
|
107 |
+
if not match:
|
108 |
+
match = self.fallback
|
109 |
+
filtered.append(match)
|
110 |
+
filtered_resps.append(filtered)
|
111 |
+
|
112 |
+
return filtered_resps
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: mmlu_flan_n_shot_loglikelihood
|
2 |
+
task:
|
3 |
+
- mmlu_flan_n_shot_loglikelihood_stem
|
4 |
+
- mmlu_flan_n_shot_loglikelihood_other
|
5 |
+
- mmlu_flan_n_shot_loglikelihood_social_sciences
|
6 |
+
- mmlu_flan_n_shot_loglikelihood_humanities
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu_flan_loglikelihood_template_yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: hails/mmlu_no_train # a copy of `cais/mmlu` with no auxiliary_train split
|
2 |
+
test_split: test
|
3 |
+
fewshot_split: dev
|
4 |
+
output_type: multiple_choice
|
5 |
+
doc_to_text: "Q: {{question.strip()}}\n(A) {{choices[0]}} (B) {{choices[1]}} (C) {{choices[2]}} (D) {{choices[3]}}\nA: "
|
6 |
+
doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
|
7 |
+
doc_to_target: answer
|
8 |
+
metric_list:
|
9 |
+
- metric: acc
|
10 |
+
aggregation: mean
|
11 |
+
higher_is_better: true
|
12 |
+
- metric: acc_norm
|
13 |
+
aggregation: mean
|
14 |
+
higher_is_better: true
|
15 |
+
metadata:
|
16 |
+
version: 0.0
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_abstract_algebra.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "abstract_algebra"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about abstract\
|
3 |
+
\ algebra.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_loglikelihood_stem"
|
5 |
+
"include": "_mmlu_flan_loglikelihood_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_loglikelihood_abstract_algebra"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_anatomy.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about anatomy.\n\
|
3 |
+
\n"
|
4 |
+
"group": "mmlu_flan_n_shot_loglikelihood_stem"
|
5 |
+
"include": "_mmlu_flan_loglikelihood_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_loglikelihood_anatomy"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_biology.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_biology"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ biology.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_loglikelihood_stem"
|
5 |
+
"include": "_mmlu_flan_loglikelihood_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_loglikelihood_college_biology"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_chemistry.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ chemistry.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_loglikelihood_stem"
|
5 |
+
"include": "_mmlu_flan_loglikelihood_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_loglikelihood_college_chemistry"
|
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_mathematics.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_mathematics"
|
2 |
+
"description": "The following are multiple choice questions (with answers) about college\
|
3 |
+
\ mathematics.\n\n"
|
4 |
+
"group": "mmlu_flan_n_shot_loglikelihood_stem"
|
5 |
+
"include": "_mmlu_flan_loglikelihood_template_yaml"
|
6 |
+
"task": "mmlu_flan_n_shot_loglikelihood_college_mathematics"
|