applied-ai-018 commited on
Commit
29b817d
·
verified ·
1 Parent(s): df39d72

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/6.attention.query_key_value.weight/fp32.pt +3 -0
  2. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml +6 -0
  3. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml +6 -0
  4. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml +6 -0
  5. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml +6 -0
  6. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml +6 -0
  7. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml +6 -0
  8. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml +6 -0
  9. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml +6 -0
  10. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml +6 -0
  11. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml +6 -0
  12. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml +6 -0
  13. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml +6 -0
  14. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml +6 -0
  15. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml +6 -0
  16. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml +6 -0
  17. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml +6 -0
  18. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml +6 -0
  19. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml +6 -0
  20. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml +6 -0
  21. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml +6 -0
  22. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml +6 -0
  23. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml +6 -0
  24. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py +112 -0
  25. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml +6 -0
  26. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml +6 -0
  27. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml +6 -0
  28. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml +6 -0
  29. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml +6 -0
  30. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml +6 -0
  31. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml +6 -0
  32. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml +6 -0
  33. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml +6 -0
  34. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml +6 -0
  35. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml +6 -0
  36. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml +6 -0
  37. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml +6 -0
  38. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml +6 -0
  39. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml +6 -0
  40. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml +6 -0
  41. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml +6 -0
  42. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml +6 -0
  43. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml +6 -0
  44. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml +6 -0
  45. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml +6 -0
  46. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml +6 -0
  47. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml +6 -0
  48. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml +6 -0
  49. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml +6 -0
  50. lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml +6 -0
ckpts/universal/global_step20/zero/6.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfa5af5f97e2261795962c3795a71294432a85c195b619cab660207907a6dfb6
3
+ size 50332749
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "abstract_algebra"
2
+ "description": "The following are multiple choice questions (with answers) about abstract\
3
+ \ algebra.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_abstract_algebra"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "astronomy"
2
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\
3
+ \n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_astronomy"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "business_ethics"
2
+ "description": "The following are multiple choice questions (with answers) about business\
3
+ \ ethics.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_other"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_business_ethics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "computer_security"
2
+ "description": "The following are multiple choice questions (with answers) about computer\
3
+ \ security.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_computer_security"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "conceptual_physics"
2
+ "description": "The following are multiple choice questions (with answers) about conceptual\
3
+ \ physics.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_conceptual_physics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "econometrics"
2
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\
3
+ \n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_econometrics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "electrical_engineering"
2
+ "description": "The following are multiple choice questions (with answers) about electrical\
3
+ \ engineering.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_electrical_engineering"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "formal_logic"
2
+ "description": "The following are multiple choice questions (with answers) about formal\
3
+ \ logic.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_formal_logic"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_biology"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school biology.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_biology"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_chemistry"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school chemistry.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_chemistry"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_european_history"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school european history.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_european_history"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_geography"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school geography.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_geography"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_mathematics"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school mathematics.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_stem"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_mathematics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_psychology"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school psychology.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_psychology"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_world_history"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school world history.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_high_school_world_history"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "international_law"
2
+ "description": "The following are multiple choice questions (with answers) about international\
3
+ \ law.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_international_law"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "management"
2
+ "description": "The following are multiple choice questions (with answers) about management.\n\
3
+ \n"
4
+ "group": "mmlu_flan_cot_zeroshot_other"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_management"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "moral_disputes"
2
+ "description": "The following are multiple choice questions (with answers) about moral\
3
+ \ disputes.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_moral_disputes"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "philosophy"
2
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\
3
+ \n"
4
+ "group": "mmlu_flan_cot_zeroshot_humanities"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_philosophy"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "professional_psychology"
2
+ "description": "The following are multiple choice questions (with answers) about professional\
3
+ \ psychology.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_professional_psychology"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "public_relations"
2
+ "description": "The following are multiple choice questions (with answers) about public\
3
+ \ relations.\n\n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_public_relations"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "sociology"
2
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\
3
+ \n"
4
+ "group": "mmlu_flan_cot_zeroshot_social_sciences"
5
+ "include": "_mmlu_flan_cot_zeroshot_template_yaml"
6
+ "task": "mmlu_flan_cot_zeroshot_sociology"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import sys
3
+ import unicodedata
4
+
5
+ from lm_eval.filters.extraction import RegexFilter
6
+
7
+
8
+ class MultiChoiceRegexFilter(RegexFilter):
9
+ """ """
10
+
11
+ def __init__(
12
+ self,
13
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
14
+ group_select=0,
15
+ fallback: str = "[invalid]",
16
+ ignore_case=False,
17
+ ignore_punctuation=False,
18
+ regexes_to_ignore=None,
19
+ ) -> None:
20
+ """
21
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
22
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
23
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
24
+ group_select: Selects the (group_select)th match from the findall result.
25
+ ignore_case: Ignores the case during step 1 matching
26
+ ignore_punctuation: Remove the punctuation during step 1 matching
27
+ regexes_to_ignore: Remove these regexes during step 1 matching
28
+ """
29
+ super().__init__(regex_pattern, group_select, fallback)
30
+ self.ignore_case = ignore_case
31
+ self.ignore_punctuation = ignore_punctuation
32
+ self.regexes_to_ignore = regexes_to_ignore
33
+
34
+ def apply(self, resps, docs):
35
+ # here, we assume we have a list, in which each element is
36
+ # a list of model responses for some particular input/target pair.
37
+ # so we process each of these (same input/target response sets)
38
+ # independently (and keep them a list.)
39
+
40
+ def find_match(regex, resp, convert_dict={}):
41
+ match = regex.findall(resp)
42
+ if match:
43
+ match = match[self.group_select]
44
+ if isinstance(match, tuple):
45
+ match = [m for m in match if m][0]
46
+ match = match.strip()
47
+ if match and match in convert_dict:
48
+ match = convert_dict[match]
49
+ return match
50
+
51
+ punct_tbl = dict.fromkeys(
52
+ i
53
+ for i in range(sys.maxunicode)
54
+ if unicodedata.category(chr(i)).startswith("P")
55
+ )
56
+
57
+ def filter_ignores(st):
58
+ if self.regexes_to_ignore is not None:
59
+ for s in self.regexes_to_ignore:
60
+ st = re.sub(s, "", st)
61
+
62
+ if self.ignore_case:
63
+ st = st.lower()
64
+
65
+ if self.ignore_punctuation:
66
+ # https://stackoverflow.com/a/266162
67
+ st = st.translate(punct_tbl)
68
+ return st
69
+
70
+ filtered_resps = []
71
+
72
+ for r, doc in zip(resps, docs):
73
+ fallback_regexes = []
74
+ choice_to_alpha = {}
75
+ next_alpha = "A"
76
+
77
+ without_paren_fallback_regexes = []
78
+ without_paren_to_target = {}
79
+
80
+ choices = doc["choices"]
81
+ for c in choices:
82
+ m = filter_ignores(c.strip())
83
+ fallback_regexes.append(f"{re.escape(m)}")
84
+ choice_to_alpha[m] = f"({next_alpha})"
85
+
86
+ without_paren_fallback_regexes.append(next_alpha)
87
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
88
+
89
+ next_alpha = chr(ord(next_alpha) + 1)
90
+ fallback_regex = re.compile("|".join(fallback_regexes))
91
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
92
+ without_paren_fallback_regex = re.compile(
93
+ f":[\s]*({without_paren_fallback_regex})"
94
+ )
95
+
96
+ filtered = []
97
+ for resp in r:
98
+ match = find_match(self.regex, resp)
99
+ if not match:
100
+ match = find_match(
101
+ fallback_regex, filter_ignores(resp), choice_to_alpha
102
+ )
103
+ if not match:
104
+ match = find_match(
105
+ without_paren_fallback_regex, resp, without_paren_to_target
106
+ )
107
+ if not match:
108
+ match = self.fallback
109
+ filtered.append(match)
110
+ filtered_resps.append(filtered)
111
+
112
+ return filtered_resps
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "astronomy"
2
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_astronomy"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "business_ethics"
2
+ "description": "The following are multiple choice questions (with answers) about business\
3
+ \ ethics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_business_ethics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "clinical_knowledge"
2
+ "description": "The following are multiple choice questions (with answers) about clinical\
3
+ \ knowledge.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_clinical_knowledge"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "college_computer_science"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ computer science.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_college_computer_science"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "college_medicine"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ medicine.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_college_medicine"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "college_physics"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ physics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_college_physics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "computer_security"
2
+ "description": "The following are multiple choice questions (with answers) about computer\
3
+ \ security.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_computer_security"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "econometrics"
2
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_econometrics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "electrical_engineering"
2
+ "description": "The following are multiple choice questions (with answers) about electrical\
3
+ \ engineering.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_electrical_engineering"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "elementary_mathematics"
2
+ "description": "The following are multiple choice questions (with answers) about elementary\
3
+ \ mathematics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_elementary_mathematics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "formal_logic"
2
+ "description": "The following are multiple choice questions (with answers) about formal\
3
+ \ logic.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_formal_logic"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "global_facts"
2
+ "description": "The following are multiple choice questions (with answers) about global\
3
+ \ facts.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_global_facts"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_biology"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school biology.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_biology"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_chemistry"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school chemistry.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_chemistry"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_computer_science"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school computer science.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_computer_science"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_geography"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school geography.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_geography"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_government_and_politics"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school government and politics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_government_and_politics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_mathematics"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school mathematics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_mathematics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_world_history"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school world history.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_world_history"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "human_aging"
2
+ "description": "The following are multiple choice questions (with answers) about human\
3
+ \ aging.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_human_aging"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "jurisprudence"
2
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_jurisprudence"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "machine_learning"
2
+ "description": "The following are multiple choice questions (with answers) about machine\
3
+ \ learning.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_machine_learning"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "marketing"
2
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_marketing"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "medical_genetics"
2
+ "description": "The following are multiple choice questions (with answers) about medical\
3
+ \ genetics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_medical_genetics"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "moral_disputes"
2
+ "description": "The following are multiple choice questions (with answers) about moral\
3
+ \ disputes.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_moral_disputes"
lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "philosophy"
2
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_philosophy"