Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/lm_eval/tasks/ammlu/_default_template_yaml +19 -0
- lm-evaluation/lm_eval/tasks/ammlu/_generate_configs.py +119 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_anatomy.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_astronomy.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_business_ethics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_biology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_medicine.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_computer_security.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_econometrics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_formal_logic.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_global_facts.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_macroeconomics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_international_law.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_management.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_marketing.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_nutrition.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_sociology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_virology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/ammlu/ammlu_world_religions.yaml +4 -0
- lm-evaluation/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml +13 -0
- lm-evaluation/lm_eval/tasks/benchmarks/minerva_math.yaml +9 -0
- lm-evaluation/lm_eval/tasks/benchmarks/openllm.yaml +18 -0
- lm-evaluation/lm_eval/tasks/benchmarks/pythia.yaml +12 -0
- lm-evaluation/lm_eval/tasks/benchmarks/t0_eval.yaml +127 -0
- lm-evaluation/lm_eval/tasks/code_x_glue/code-text/go.yaml +21 -0
- lm-evaluation/lm_eval/tasks/code_x_glue/code-text/java.yaml +21 -0
lm-evaluation/lm_eval/tasks/ammlu/_default_template_yaml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: ammlu
|
2 |
+
dataset_path: Hennara/ammlu
|
3 |
+
test_split: test
|
4 |
+
fewshot_split: dev
|
5 |
+
fewshot_config:
|
6 |
+
sampler: first_n
|
7 |
+
output_type: multiple_choice
|
8 |
+
doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nالجواب:"
|
9 |
+
doc_to_choice: ["A", "B", "C", "D"]
|
10 |
+
doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}"
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
aggregation: mean
|
14 |
+
higher_is_better: true
|
15 |
+
- metric: acc_norm
|
16 |
+
aggregation: mean
|
17 |
+
higher_is_better: true
|
18 |
+
metadata:
|
19 |
+
version: 0.0
|
lm-evaluation/lm_eval/tasks/ammlu/_generate_configs.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Take in a YAML, and output all other splits with this YAML
|
3 |
+
"""
|
4 |
+
import argparse
|
5 |
+
import os
|
6 |
+
|
7 |
+
import yaml
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
|
11 |
+
SUBJECTS = {
|
12 |
+
"abstract_algebra": "ألعلوم وتقنية المعلومات و الرياضيات",
|
13 |
+
"anatomy": "ألعلوم وتقنية المعلومات و الرياضيات",
|
14 |
+
"astronomy": "ألعلوم وتقنية المعلومات و الرياضيات",
|
15 |
+
"business_ethics": "علوم أخرى",
|
16 |
+
"clinical_knowledge": "علوم أخرى",
|
17 |
+
"college_biology": "ألعلوم وتقنية المعلومات و الرياضيات",
|
18 |
+
"college_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات",
|
19 |
+
"college_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات",
|
20 |
+
"college_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
21 |
+
"college_medicine": "علوم أخرى",
|
22 |
+
"college_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
23 |
+
"computer_security": "ألعلوم وتقنية المعلومات و الرياضيات",
|
24 |
+
"conceptual_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
25 |
+
"econometrics": "العلوم الإجتماعية",
|
26 |
+
"electrical_engineering": "ألعلوم وتقنية المعلومات و الرياضيات",
|
27 |
+
"elementary_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
28 |
+
"formal_logic": "العلوم الانسانية",
|
29 |
+
"global_facts": "علوم أخرى",
|
30 |
+
"high_school_biology": "ألعلوم وتقنية المعلومات و الرياضيات",
|
31 |
+
"high_school_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات",
|
32 |
+
"high_school_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات",
|
33 |
+
"high_school_european_history": "العلوم الانسانية",
|
34 |
+
"high_school_geography": "العلوم الإجتماعية",
|
35 |
+
"high_school_government_and_politics": "العلوم الإجتماعية",
|
36 |
+
"high_school_macroeconomics": "العلوم الإجتماعية",
|
37 |
+
"high_school_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
38 |
+
"high_school_microeconomics": "العلوم الإجتماعية",
|
39 |
+
"high_school_physics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
40 |
+
"high_school_psychology": "العلوم الإجتماعية",
|
41 |
+
"high_school_statistics": "ألعلوم وتقنية المعلومات و الرياضيات",
|
42 |
+
"high_school_us_history": "العلوم الانسانية",
|
43 |
+
"high_school_world_history": "العلوم الانسانية",
|
44 |
+
"human_aging": "علوم أخرى",
|
45 |
+
"human_sexuality": "العلوم الإجتماعية",
|
46 |
+
"international_law": "العلوم الانسانية",
|
47 |
+
"jurisprudence": "العلوم الانسانية",
|
48 |
+
"logical_fallacies": "العلوم الانسانية",
|
49 |
+
"machine_learning": "ألعلوم وتقنية المعلومات و الرياضيات",
|
50 |
+
"management": "علوم أخرى",
|
51 |
+
"marketing": "علوم أخرى",
|
52 |
+
"medical_genetics": "علوم أخرى",
|
53 |
+
"miscellaneous": "علوم أخرى",
|
54 |
+
"moral_disputes": "العلوم الانسانية",
|
55 |
+
"moral_scenarios": "العلوم الانسانية",
|
56 |
+
"nutrition": "علوم أخرى",
|
57 |
+
"philosophy": "العلوم الانسانية",
|
58 |
+
"prehistory": "العلوم الانسانية",
|
59 |
+
"professional_accounting": "علوم أخرى",
|
60 |
+
"professional_law": "العلوم الانسانية",
|
61 |
+
"professional_medicine": "علوم أخرى",
|
62 |
+
"professional_psychology": "العلوم الإجتماعية",
|
63 |
+
"public_relations": "العلوم الإجتماعية",
|
64 |
+
"security_studies": "العلوم الإجتماعية",
|
65 |
+
"sociology": "العلوم الإجتماعية",
|
66 |
+
"us_foreign_policy": "العلوم الإجتماعية",
|
67 |
+
"virology": "علوم أخرى",
|
68 |
+
"world_religions": "العلوم الانسانية",
|
69 |
+
}
|
70 |
+
|
71 |
+
|
72 |
+
def parse_args():
|
73 |
+
parser = argparse.ArgumentParser()
|
74 |
+
parser.add_argument("--base_yaml_path", required=True)
|
75 |
+
parser.add_argument("--save_prefix_path", default="ammlu")
|
76 |
+
parser.add_argument("--cot_prompt_path", default=None)
|
77 |
+
parser.add_argument("--task_prefix", default="")
|
78 |
+
return parser.parse_args()
|
79 |
+
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
args = parse_args()
|
83 |
+
|
84 |
+
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
|
85 |
+
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
|
86 |
+
with open(args.base_yaml_path, encoding="utf-8") as f:
|
87 |
+
base_yaml = yaml.full_load(f)
|
88 |
+
|
89 |
+
if args.cot_prompt_path is not None:
|
90 |
+
import json
|
91 |
+
|
92 |
+
with open(args.cot_prompt_path, encoding="utf-8") as f:
|
93 |
+
cot_file = json.load(f)
|
94 |
+
|
95 |
+
for subject_eng, category in tqdm(SUBJECTS.items()):
|
96 |
+
if args.cot_prompt_path is not None:
|
97 |
+
description = cot_file[subject_eng]
|
98 |
+
else:
|
99 |
+
description = f"فم بعملية التقييم في مجال {category} \n\n"
|
100 |
+
|
101 |
+
yaml_dict = {
|
102 |
+
"include": base_yaml_name,
|
103 |
+
"task": f"ammlu_{args.task_prefix}_{subject_eng}"
|
104 |
+
if args.task_prefix != ""
|
105 |
+
else f"ammlu_{subject_eng}",
|
106 |
+
"dataset_name": subject_eng,
|
107 |
+
"description": description,
|
108 |
+
}
|
109 |
+
|
110 |
+
file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
|
111 |
+
print(f"Saving yaml for subset {subject_eng} to {file_save_path}")
|
112 |
+
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
|
113 |
+
yaml.dump(
|
114 |
+
yaml_dict,
|
115 |
+
yaml_file,
|
116 |
+
width=float("inf"),
|
117 |
+
allow_unicode=True,
|
118 |
+
default_style='"',
|
119 |
+
)
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_anatomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_anatomy"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_astronomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "astronomy"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_astronomy"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_business_ethics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "business_ethics"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_business_ethics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_biology"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_college_biology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_college_chemistry"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_computer_science"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_college_computer_science"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_medicine.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_medicine"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_college_medicine"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_college_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_physics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_college_physics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_computer_security.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_security"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_computer_security"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "conceptual_physics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_conceptual_physics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_econometrics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "econometrics"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_econometrics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_mathematics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_elementary_mathematics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_formal_logic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "formal_logic"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_formal_logic"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_global_facts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "global_facts"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_global_facts"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_biology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_chemistry"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_chemistry"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_computer_science"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_computer_science"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_european_history"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_european_history"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_government_and_politics"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_government_and_politics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_macroeconomics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_macroeconomics"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_macroeconomics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_mathematics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_mathematics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_microeconomics"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_microeconomics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_physics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_physics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_psychology"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_psychology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_statistics"
|
2 |
+
"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_statistics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "human_sexuality"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_human_sexuality"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_international_law.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "international_law"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_international_law"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "jurisprudence"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_jurisprudence"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical_fallacies"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_logical_fallacies"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_management.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "management"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_management"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_marketing.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "marketing"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_marketing"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "medical_genetics"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_medical_genetics"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "miscellaneous"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_miscellaneous"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "moral_disputes"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_moral_disputes"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_nutrition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "nutrition"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_nutrition"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_accounting"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_professional_accounting"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "professional_psychology"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_professional_psychology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_sociology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sociology"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_sociology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "us_foreign_policy"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_us_foreign_policy"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_virology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "virology"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_virology"
|
lm-evaluation/lm_eval/tasks/ammlu/ammlu_world_religions.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "world_religions"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_world_religions"
|
lm-evaluation/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: flan_held_out
|
2 |
+
task:
|
3 |
+
# BBH
|
4 |
+
- bbh_zeroshot
|
5 |
+
- bbh_fewshot
|
6 |
+
- bbh_cot_fewshot
|
7 |
+
- bbh_cot_zeroshot
|
8 |
+
# MMLU
|
9 |
+
- mmlu
|
10 |
+
- mmlu_flan_n_shot_generative
|
11 |
+
- mmlu_flan_n_shot_loglikelihood
|
12 |
+
- mmlu_flan_cot_zeroshot
|
13 |
+
- mmlu_flan_cot_fewshot
|
lm-evaluation/lm_eval/tasks/benchmarks/minerva_math.yaml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: minerva_math
|
2 |
+
task:
|
3 |
+
- minerva_math_algebra
|
4 |
+
- minerva_math_counting_and_prob
|
5 |
+
- minerva_math_geometry
|
6 |
+
- minerva_math_intermediate_algebra
|
7 |
+
- minerva_math_num_theory
|
8 |
+
- minerva_math_prealgebra
|
9 |
+
- minerva_math_precalc
|
lm-evaluation/lm_eval/tasks/benchmarks/openllm.yaml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: openllm
|
2 |
+
group_alias: Open LLM Leaderboard
|
3 |
+
task:
|
4 |
+
- task: arc_challenge
|
5 |
+
fewshot_split: validation
|
6 |
+
num_fewshot: 25
|
7 |
+
- task: hellaswag
|
8 |
+
fewshot_split: train
|
9 |
+
num_fewshot: 10
|
10 |
+
- task: truthfulqa
|
11 |
+
num_fewshot: 0
|
12 |
+
- task: mmlu
|
13 |
+
num_fewshot: 5
|
14 |
+
- task: winogrande
|
15 |
+
fewshot_split: train
|
16 |
+
num_fewshot: 5
|
17 |
+
- task: gsm8k
|
18 |
+
num_fewshot: 5
|
lm-evaluation/lm_eval/tasks/benchmarks/pythia.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: pythia
|
2 |
+
task:
|
3 |
+
- lambada_openai
|
4 |
+
- logiqa
|
5 |
+
- piqa
|
6 |
+
- sciq
|
7 |
+
- wikitext
|
8 |
+
- winogrande
|
9 |
+
- wsc
|
10 |
+
- ai2_arc
|
11 |
+
- blimp
|
12 |
+
- mmlu
|
lm-evaluation/lm_eval/tasks/benchmarks/t0_eval.yaml
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: t0_eval
|
2 |
+
task:
|
3 |
+
# Coreference Resolution
|
4 |
+
- dataset_path: super_glue
|
5 |
+
dataset_name: wsc.fixed
|
6 |
+
use_prompt: promptsource:*
|
7 |
+
training_split: train
|
8 |
+
validation_split: validation
|
9 |
+
output_type: generate_until
|
10 |
+
metric_list:
|
11 |
+
- metric: exact_match
|
12 |
+
aggregation: mean
|
13 |
+
higher_is_better: true
|
14 |
+
ignore_case: true
|
15 |
+
ignore_punctuation: true
|
16 |
+
# Coreference Resolution
|
17 |
+
- dataset_path: winogrande
|
18 |
+
dataset_name: winogrande_xl
|
19 |
+
use_prompt: promptsource:*
|
20 |
+
training_split: train
|
21 |
+
validation_split: validation
|
22 |
+
output_type: generate_until
|
23 |
+
metric_list:
|
24 |
+
- metric: exact_match
|
25 |
+
aggregation: mean
|
26 |
+
higher_is_better: true
|
27 |
+
ignore_case: true
|
28 |
+
ignore_punctuation: true
|
29 |
+
# Natural Language Inference
|
30 |
+
- dataset_path: super_glue
|
31 |
+
dataset_name: cb
|
32 |
+
use_prompt: promptsource:*
|
33 |
+
training_split: train
|
34 |
+
validation_split: validation
|
35 |
+
output_type: generate_until
|
36 |
+
metric_list:
|
37 |
+
- metric: exact_match
|
38 |
+
aggregation: mean
|
39 |
+
higher_is_better: true
|
40 |
+
ignore_case: true
|
41 |
+
ignore_punctuation: true
|
42 |
+
- dataset_path: super_glue
|
43 |
+
dataset_name: rte
|
44 |
+
use_prompt: promptsource:*
|
45 |
+
training_split: train
|
46 |
+
validation_split: validation
|
47 |
+
output_type: generate_until
|
48 |
+
metric_list:
|
49 |
+
- metric: exact_match
|
50 |
+
aggregation: mean
|
51 |
+
higher_is_better: true
|
52 |
+
ignore_case: true
|
53 |
+
ignore_punctuation: true
|
54 |
+
- task: anli_r1
|
55 |
+
dataset_path: anli
|
56 |
+
use_prompt: promptsource:*
|
57 |
+
training_split: train_r1
|
58 |
+
validation_split: dev_r1
|
59 |
+
output_type: generate_until
|
60 |
+
metric_list:
|
61 |
+
- metric: exact_match
|
62 |
+
aggregation: mean
|
63 |
+
higher_is_better: true
|
64 |
+
ignore_case: true
|
65 |
+
ignore_punctuation: true
|
66 |
+
- task: anli_r2
|
67 |
+
dataset_path: anli
|
68 |
+
use_prompt: promptsource:*
|
69 |
+
training_split: train_r2
|
70 |
+
validation_split: dev_r2
|
71 |
+
output_type: generate_until
|
72 |
+
metric_list:
|
73 |
+
- metric: exact_match
|
74 |
+
aggregation: mean
|
75 |
+
higher_is_better: true
|
76 |
+
ignore_case: true
|
77 |
+
ignore_punctuation: true
|
78 |
+
- task: anli_r3
|
79 |
+
dataset_path: anli
|
80 |
+
use_prompt: promptsource:*
|
81 |
+
training_split: train_r3
|
82 |
+
validation_split: dev_r3
|
83 |
+
output_type: generate_until
|
84 |
+
metric_list:
|
85 |
+
- metric: exact_match
|
86 |
+
aggregation: mean
|
87 |
+
higher_is_better: true
|
88 |
+
ignore_case: true
|
89 |
+
ignore_punctuation: true
|
90 |
+
# Sentence Completion
|
91 |
+
- dataset_path: super_glue
|
92 |
+
dataset_name: copa
|
93 |
+
use_prompt: promptsource:*
|
94 |
+
training_split: train
|
95 |
+
validation_split: validation
|
96 |
+
output_type: generate_until
|
97 |
+
metric_list:
|
98 |
+
- metric: exact_match
|
99 |
+
aggregation: mean
|
100 |
+
higher_is_better: true
|
101 |
+
ignore_case: true
|
102 |
+
ignore_punctuation: true
|
103 |
+
# Natural Language Inference
|
104 |
+
- dataset_path: hellaswag
|
105 |
+
use_prompt: promptsource:*
|
106 |
+
training_split: train
|
107 |
+
validation_split: validation
|
108 |
+
output_type: generate_until
|
109 |
+
metric_list:
|
110 |
+
- metric: exact_match
|
111 |
+
aggregation: mean
|
112 |
+
higher_is_better: true
|
113 |
+
ignore_case: true
|
114 |
+
ignore_punctuation: true
|
115 |
+
# Word Sense Disambiguation
|
116 |
+
- dataset_path: super_glue
|
117 |
+
dataset_name: wic
|
118 |
+
use_prompt: promptsource:*
|
119 |
+
training_split: train
|
120 |
+
validation_split: validation
|
121 |
+
output_type: generate_until
|
122 |
+
metric_list:
|
123 |
+
- metric: exact_match
|
124 |
+
aggregation: mean
|
125 |
+
higher_is_better: true
|
126 |
+
ignore_case: true
|
127 |
+
ignore_punctuation: true
|
lm-evaluation/lm_eval/tasks/code_x_glue/code-text/go.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- codexglue_code2text
|
3 |
+
task: code2text_go
|
4 |
+
dataset_path: CM/codexglue_code2text_go
|
5 |
+
training_split: train
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
generation_kwargs:
|
10 |
+
num_beams: 10
|
11 |
+
max_gen_toks: 128
|
12 |
+
until:
|
13 |
+
- "</s>"
|
14 |
+
doc_to_text: !function utils.doc_to_text
|
15 |
+
doc_to_target: !function utils.doc_to_target
|
16 |
+
metric_list:
|
17 |
+
- metric: !function bleu.smoothed_bleu_4
|
18 |
+
aggregation: mean
|
19 |
+
higher_is_better: True
|
20 |
+
metadata:
|
21 |
+
version: 1.0
|
lm-evaluation/lm_eval/tasks/code_x_glue/code-text/java.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- codexglue_code2text
|
3 |
+
task: code2text_java
|
4 |
+
dataset_path: CM/codexglue_code2text_java
|
5 |
+
training_split: train
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
generation_kwargs:
|
10 |
+
num_beams: 10
|
11 |
+
max_gen_toks: 128
|
12 |
+
until:
|
13 |
+
- "</s>"
|
14 |
+
doc_to_text: !function utils.doc_to_text
|
15 |
+
doc_to_target: !function utils.doc_to_target
|
16 |
+
metric_list:
|
17 |
+
- metric: !function bleu.smoothed_bleu_4
|
18 |
+
aggregation: mean
|
19 |
+
higher_is_better: True
|
20 |
+
metadata:
|
21 |
+
version: 1.0
|