applied-ai-018 commited on
Commit
e33b413
ยท
verified ยท
1 Parent(s): aa77b50

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes. ย  See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/agieval/lsat-ar.yaml +7 -0
  2. lm-evaluation/lm_eval/tasks/ceval/README.md +127 -0
  3. lm-evaluation/lm_eval/tasks/ceval/_generate_configs.py +118 -0
  4. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_accountant.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_education_science.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_law.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_logic.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_marxism.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_physician.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/csatqa/_default_csatqa_yaml +17 -0
  42. lm-evaluation/lm_eval/tasks/csatqa/_generate_configs.py +50 -0
  43. lm-evaluation/lm_eval/tasks/csatqa/csatqa_gr.yaml +3 -0
  44. lm-evaluation/lm_eval/tasks/csatqa/csatqa_li.yaml +3 -0
  45. lm-evaluation/lm_eval/tasks/csatqa/csatqa_rch.yaml +3 -0
  46. lm-evaluation/lm_eval/tasks/csatqa/csatqa_rcs.yaml +3 -0
  47. lm-evaluation/lm_eval/tasks/csatqa/csatqa_rcss.yaml +3 -0
  48. lm-evaluation/lm_eval/tasks/csatqa/csatqa_wr.yaml +3 -0
  49. lm-evaluation/lm_eval/tasks/csatqa/utils.py +20 -0
  50. lm-evaluation/lm_eval/tasks/glue/qnli/default.yaml +14 -0
lm-evaluation/lm_eval/tasks/agieval/lsat-ar.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_ar
7
+ dataset_path: hails/agieval-lsat-ar
lm-evaluation/lm_eval/tasks/ceval/README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # C-Eval (Validation)
2
+
3
+ ### Paper
4
+ C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models
5
+ https://arxiv.org/pdf/2305.08322.pdf
6
+
7
+ C-Eval is a comprehensive Chinese evaluation suite for foundation models.
8
+ It consists of 13948 multi-choice questions spanning 52 diverse disciplines
9
+ and four difficulty levels.
10
+
11
+ Homepage: https://cevalbenchmark.com/
12
+
13
+ ### Citation
14
+
15
+ ```bibtex
16
+ @article{huang2023ceval,
17
+ title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
18
+ author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
19
+ journal={arXiv preprint arXiv:2305.08322},
20
+ year={2023}
21
+ }
22
+ ```
23
+
24
+
25
+ SUBJECTS = {
26
+ "computer_network":"่ฎก็ฎ—ๆœบ็ฝ‘็ปœ",
27
+ "operating_system":"ๆ“ไฝœ็ณป็ปŸ",
28
+ "computer_architecture":"่ฎก็ฎ—ๆœบ็ป„ๆˆ",
29
+ "college_programming":"ๅคงๅญฆ็ผ–็จ‹",
30
+ "college_physics":"ๅคงๅญฆ็‰ฉ็†",
31
+ "college_chemistry":"ๅคงๅญฆๅŒ–ๅญฆ",
32
+ "advanced_mathematics":"้ซ˜็ญ‰ๆ•ฐๅญฆ",
33
+ "probability_and_statistics":"ๆฆ‚็އ็ปŸ่ฎก",
34
+ "discrete_mathematics":"็ฆปๆ•ฃๆ•ฐๅญฆ",
35
+ "electrical_engineer":"ๆณจๅ†Œ็”ตๆฐ”ๅทฅ็จ‹ๅธˆ",
36
+ "metrology_engineer":"ๆณจๅ†Œ่ฎก้‡ๅธˆ",
37
+ "high_school_mathematics":"้ซ˜ไธญๆ•ฐๅญฆ",
38
+ "high_school_physics":"้ซ˜ไธญ็‰ฉ็†",
39
+ "high_school_chemistry":"้ซ˜ไธญๅŒ–ๅญฆ",
40
+ "high_school_biology":"้ซ˜ไธญ็”Ÿ็‰ฉ",
41
+ "middle_school_mathematics":"ๅˆไธญๆ•ฐๅญฆ",
42
+ "middle_school_biology":"ๅˆไธญ็”Ÿ็‰ฉ",
43
+ "middle_school_physics":"ๅˆไธญ็‰ฉ็†",
44
+ "middle_school_chemistry":"ๅˆไธญๅŒ–ๅญฆ",
45
+ "veterinary_medicine":"ๅ…ฝๅŒปๅญฆ",
46
+ "college_economics":"ๅคงๅญฆ็ปๆตŽๅญฆ",
47
+ "business_administration":"ๅทฅๅ•†็ฎก็†",
48
+ "marxism":"้ฉฌๅ…‹ๆ€ไธปไน‰ๅŸบๆœฌๅŽŸ็†",
49
+ "mao_zedong_thought":"ๆฏ›ๆณฝไธœๆ€ๆƒณๅ’Œไธญๅ›ฝ็‰น่‰ฒ็คพไผšไธปไน‰็†่ฎบไฝ“็ณปๆฆ‚่ฎบ",
50
+ "education_science":"ๆ•™่‚ฒๅญฆ",
51
+ "teacher_qualification":"ๆ•™ๅธˆ่ต„ๆ ผ",
52
+ "high_school_politics":"้ซ˜ไธญๆ”ฟๆฒป",
53
+ "high_school_geography":"้ซ˜ไธญๅœฐ็†",
54
+ "middle_school_politics":"ๅˆไธญๆ”ฟๆฒป",
55
+ "middle_school_geography":"ๅˆไธญๅœฐ็†",
56
+ "modern_chinese_history":"่ฟ‘ไปฃๅฒ็บฒ่ฆ",
57
+ "ideological_and_moral_cultivation":"ๆ€ๆƒณ้“ๅพทไฟฎๅ…ปไธŽๆณ•ๅพ‹ๅŸบ็ก€",
58
+ "logic":"้€ป่พ‘ๅญฆ",
59
+ "law":"ๆณ•ๅญฆ",
60
+ "chinese_language_and_literature":"ไธญๅ›ฝ่ฏญ่จ€ๆ–‡ๅญฆ",
61
+ "art_studies":"่‰บๆœฏๅญฆ",
62
+ "professional_tour_guide":"ๅฏผๆธธ่ต„ๆ ผ",
63
+ "legal_professional":"ๆณ•ๅพ‹่Œไธš่ต„ๆ ผ",
64
+ "high_school_chinese":"้ซ˜ไธญ่ฏญๆ–‡",
65
+ "high_school_history":"้ซ˜ไธญๅކๅฒ",
66
+ "middle_school_history":"ๅˆไธญๅކๅฒ",
67
+ "civil_servant":"ๅ…ฌๅŠกๅ‘˜",
68
+ "sports_science":"ไฝ“่‚ฒๅญฆ",
69
+ "plant_protection":"ๆค็‰ฉไฟๆŠค",
70
+ "basic_medicine":"ๅŸบ็ก€ๅŒปๅญฆ",
71
+ "clinical_medicine":"ไธดๅบŠๅŒปๅญฆ",
72
+ "urban_and_rural_planner":"ๆณจๅ†ŒๅŸŽไนก่ง„ๅˆ’ๅธˆ",
73
+ "accountant":"ๆณจๅ†Œไผš่ฎกๅธˆ",
74
+ "fire_engineer":"ๆณจๅ†Œๆถˆ้˜ฒๅทฅ็จ‹ๅธˆ",
75
+ "environmental_impact_assessment_engineer":"็Žฏๅขƒๅฝฑๅ“่ฏ„ไปทๅทฅ็จ‹ๅธˆ",
76
+ "tax_accountant":"็จŽๅŠกๅธˆ",
77
+ "physician":"ๅŒปๅธˆ่ต„ๆ ผ"
78
+ }
79
+
80
+
81
+ # CMMLU
82
+
83
+ ### Paper
84
+
85
+ CMMLU: Measuring massive multitask language understanding in Chinese
86
+ https://arxiv.org/abs/2306.09212
87
+
88
+ CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
89
+ CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
90
+
91
+ Homepage: https://github.com/haonan-li/CMMLU
92
+
93
+ ### Citation
94
+
95
+ ```bibtex
96
+ @misc{li2023cmmlu,
97
+ title={CMMLU: Measuring massive multitask language understanding in Chinese},
98
+ author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
99
+ year={2023},
100
+ eprint={2306.09212},
101
+ archivePrefix={arXiv},
102
+ primaryClass={cs.CL}
103
+ }
104
+ ```
105
+
106
+ ### Groups and Tasks
107
+
108
+ #### Groups
109
+
110
+ - `ceval-valid`: All 52 subjects of the C-Eval dataset, evaluated following the methodology in MMLU's original implementation. This implementation consists solely of the validation set of C-Eval, as the test set requires submission of model predictions to an external site.
111
+
112
+ #### Tasks
113
+
114
+
115
+ The following tasks evaluate subjects in the C-Eval dataset using loglikelihood-based multiple-choice scoring:
116
+ - `ceval-valid_{subject_english}`
117
+
118
+ ### Checklist
119
+
120
+ * [x] Is the task an existing benchmark in the literature?
121
+ * [x] Have you referenced the original paper that introduced the task?
122
+ * [ ] If yes, does the original paper provide a reference implementation?
123
+
124
+ If other tasks on this dataset are already supported:
125
+ * [x] Is the "Main" variant of this task clearly denoted?
126
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
127
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/ceval/_generate_configs.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import yaml
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.logger import eval_logger
11
+
12
+
13
+ SUBJECTS = {
14
+ "computer_network": "่ฎก็ฎ—ๆœบ็ฝ‘็ปœ",
15
+ "operating_system": "ๆ“ไฝœ็ณป็ปŸ",
16
+ "computer_architecture": "่ฎก็ฎ—ๆœบ็ป„ๆˆ",
17
+ "college_programming": "ๅคงๅญฆ็ผ–็จ‹",
18
+ "college_physics": "ๅคงๅญฆ็‰ฉ็†",
19
+ "college_chemistry": "ๅคงๅญฆๅŒ–ๅญฆ",
20
+ "advanced_mathematics": "้ซ˜็ญ‰ๆ•ฐๅญฆ",
21
+ "probability_and_statistics": "ๆฆ‚็އ็ปŸ่ฎก",
22
+ "discrete_mathematics": "็ฆปๆ•ฃๆ•ฐๅญฆ",
23
+ "electrical_engineer": "ๆณจๅ†Œ็”ตๆฐ”ๅทฅ็จ‹ๅธˆ",
24
+ "metrology_engineer": "ๆณจๅ†Œ่ฎก้‡ๅธˆ",
25
+ "high_school_mathematics": "้ซ˜ไธญๆ•ฐๅญฆ",
26
+ "high_school_physics": "้ซ˜ไธญ็‰ฉ็†",
27
+ "high_school_chemistry": "้ซ˜ไธญๅŒ–ๅญฆ",
28
+ "high_school_biology": "้ซ˜ไธญ็”Ÿ็‰ฉ",
29
+ "middle_school_mathematics": "ๅˆไธญๆ•ฐๅญฆ",
30
+ "middle_school_biology": "ๅˆไธญ็”Ÿ็‰ฉ",
31
+ "middle_school_physics": "ๅˆไธญ็‰ฉ็†",
32
+ "middle_school_chemistry": "ๅˆไธญๅŒ–ๅญฆ",
33
+ "veterinary_medicine": "ๅ…ฝๅŒปๅญฆ",
34
+ "college_economics": "ๅคงๅญฆ็ปๆตŽๅญฆ",
35
+ "business_administration": "ๅทฅๅ•†็ฎก็†",
36
+ "marxism": "้ฉฌๅ…‹ๆ€ไธปไน‰ๅŸบๆœฌๅŽŸ็†",
37
+ "mao_zedong_thought": "ๆฏ›ๆณฝไธœๆ€ๆƒณๅ’Œไธญๅ›ฝ็‰น่‰ฒ็คพไผšไธปไน‰็†่ฎบไฝ“็ณปๆฆ‚่ฎบ",
38
+ "education_science": "ๆ•™่‚ฒๅญฆ",
39
+ "teacher_qualification": "ๆ•™ๅธˆ่ต„ๆ ผ",
40
+ "high_school_politics": "้ซ˜ไธญๆ”ฟๆฒป",
41
+ "high_school_geography": "้ซ˜ไธญๅœฐ็†",
42
+ "middle_school_politics": "ๅˆไธญๆ”ฟๆฒป",
43
+ "middle_school_geography": "ๅˆไธญๅœฐ็†",
44
+ "modern_chinese_history": "่ฟ‘ไปฃๅฒ็บฒ่ฆ",
45
+ "ideological_and_moral_cultivation": "ๆ€ๆƒณ้“ๅพทไฟฎๅ…ปไธŽๆณ•ๅพ‹ๅŸบ็ก€",
46
+ "logic": "้€ป่พ‘ๅญฆ",
47
+ "law": "ๆณ•ๅญฆ",
48
+ "chinese_language_and_literature": "ไธญๅ›ฝ่ฏญ่จ€ๆ–‡ๅญฆ",
49
+ "art_studies": "่‰บๆœฏๅญฆ",
50
+ "professional_tour_guide": "ๅฏผๆธธ่ต„ๆ ผ",
51
+ "legal_professional": "ๆณ•ๅพ‹่Œไธš่ต„ๆ ผ",
52
+ "high_school_chinese": "้ซ˜ไธญ่ฏญๆ–‡",
53
+ "high_school_history": "้ซ˜ไธญๅކๅฒ",
54
+ "middle_school_history": "ๅˆไธญๅކๅฒ",
55
+ "civil_servant": "ๅ…ฌๅŠกๅ‘˜",
56
+ "sports_science": "ไฝ“่‚ฒๅญฆ",
57
+ "plant_protection": "ๆค็‰ฉไฟๆŠค",
58
+ "basic_medicine": "ๅŸบ็ก€ๅŒปๅญฆ",
59
+ "clinical_medicine": "ไธดๅบŠๅŒปๅญฆ",
60
+ "urban_and_rural_planner": "ๆณจๅ†ŒๅŸŽไนก่ง„ๅˆ’ๅธˆ",
61
+ "accountant": "ๆณจๅ†Œไผš่ฎกๅธˆ",
62
+ "fire_engineer": "ๆณจๅ†Œๆถˆ้˜ฒๅทฅ็จ‹ๅธˆ",
63
+ "environmental_impact_assessment_engineer": "็Žฏๅขƒๅฝฑๅ“่ฏ„ไปทๅทฅ็จ‹ๅธˆ",
64
+ "tax_accountant": "็จŽๅŠกๅธˆ",
65
+ "physician": "ๅŒปๅธˆ่ต„ๆ ผ",
66
+ }
67
+
68
+
69
+ def parse_args():
70
+ parser = argparse.ArgumentParser()
71
+ parser.add_argument("--base_yaml_path", required=True)
72
+ parser.add_argument("--save_prefix_path", default="ceval-valid")
73
+ parser.add_argument("--cot_prompt_path", default=None)
74
+ parser.add_argument("--task_prefix", default="")
75
+ return parser.parse_args()
76
+
77
+
78
+ if __name__ == "__main__":
79
+ args = parse_args()
80
+
81
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
82
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
83
+ with open(args.base_yaml_path, encoding="utf-8") as f:
84
+ base_yaml = yaml.full_load(f)
85
+
86
+ if args.cot_prompt_path is not None:
87
+ import json
88
+
89
+ with open(args.cot_prompt_path, encoding="utf-8") as f:
90
+ cot_file = json.load(f)
91
+
92
+ for subject_eng, subject_zh in tqdm(SUBJECTS.items()):
93
+ if args.cot_prompt_path is not None:
94
+ description = cot_file[subject_eng]
95
+ else:
96
+ description = (
97
+ f"ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ{subject_zh}็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
98
+ )
99
+
100
+ yaml_dict = {
101
+ "include": base_yaml_name,
102
+ "task": f"ceval-valid_{args.task_prefix}_{subject_eng}"
103
+ if args.task_prefix != ""
104
+ else f"ceval-valid_{subject_eng}",
105
+ "dataset_name": subject_eng,
106
+ "description": description,
107
+ }
108
+
109
+ file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
110
+ eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}")
111
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
112
+ yaml.dump(
113
+ yaml_dict,
114
+ yaml_file,
115
+ width=float("inf"),
116
+ allow_unicode=True,
117
+ default_style='"',
118
+ )
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_accountant.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "accountant"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆณจๅ†Œไผš่ฎกๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_accountant"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "advanced_mathematics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜็ญ‰ๆ•ฐๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_advanced_mathematics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "art_studies"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ่‰บๆœฏๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_art_studies"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "basic_medicine"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅŸบ็ก€ๅŒปๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_basic_medicine"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "civil_servant"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅ…ฌๅŠกๅ‘˜็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_civil_servant"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "clinical_medicine"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽไธดๅบŠๅŒปๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_clinical_medicine"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_physics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅคงๅญฆ็‰ฉ็†็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_college_physics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_programming"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅคงๅญฆ็ผ–็จ‹็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_college_programming"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "computer_network"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ่ฎก็ฎ—ๆœบ็ฝ‘็ปœ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_computer_network"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "discrete_mathematics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ็ฆปๆ•ฃๆ•ฐๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_discrete_mathematics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_education_science.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "education_science"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆ•™่‚ฒๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_education_science"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "electrical_engineer"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆณจๅ†Œ็”ตๆฐ”ๅทฅ็จ‹ๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_electrical_engineer"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "environmental_impact_assessment_engineer"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ็Žฏๅขƒๅฝฑๅ“่ฏ„ไปทๅทฅ็จ‹ๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_environmental_impact_assessment_engineer"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_chemistry"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜ไธญๅŒ–ๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_chemistry"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_chinese"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜ไธญ่ฏญๆ–‡็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_chinese"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_geography"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜ไธญๅœฐ็†็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_geography"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_mathematics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜ไธญๆ•ฐๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_mathematics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_politics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ซ˜ไธญๆ”ฟๆฒป็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_politics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ideological_and_moral_cultivation"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆ€ๆƒณ้“ๅพทไฟฎๅ…ปไธŽๆณ•ๅพ‹ๅŸบ็ก€็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_ideological_and_moral_cultivation"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_law.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "law"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆณ•ๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_law"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_logic.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "logic"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้€ป่พ‘ๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_logic"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "mao_zedong_thought"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆฏ›ๆณฝไธœๆ€ๆƒณๅ’Œไธญๅ›ฝ็‰น่‰ฒ็คพไผšไธปไน‰็†่ฎบไฝ“็ณปๆฆ‚่ฎบ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_mao_zedong_thought"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_marxism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "marxism"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ้ฉฌๅ…‹ๆ€ไธปไน‰ๅŸบๆœฌๅŽŸ็†็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_marxism"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "metrology_engineer"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆณจๅ†Œ่ฎก้‡ๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_metrology_engineer"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_chemistry"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅˆไธญๅŒ–ๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_chemistry"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_geography"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅˆไธญๅœฐ็†็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_geography"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_history"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅˆไธญๅކๅฒ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_history"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_mathematics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅˆไธญๆ•ฐๅญฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_mathematics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_politics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅˆไธญๆ”ฟๆฒป็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_politics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "modern_chinese_history"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ่ฟ‘ไปฃๅฒ็บฒ่ฆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_modern_chinese_history"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_physician.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "physician"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅŒปๅธˆ่ต„ๆ ผ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_physician"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "plant_protection"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆค็‰ฉไฟๆŠค็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_plant_protection"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "probability_and_statistics"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆฆ‚็އ็ปŸ่ฎก็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_probability_and_statistics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "professional_tour_guide"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๅฏผๆธธ่ต„ๆ ผ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_professional_tour_guide"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "tax_accountant"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽ็จŽๅŠกๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_tax_accountant"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "teacher_qualification"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆ•™ๅธˆ่ต„ๆ ผ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_teacher_qualification"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "urban_and_rural_planner"
2
+ "description": "ไปฅไธ‹ๆ˜ฏไธญๅ›ฝๅ…ณไบŽๆณจๅ†ŒๅŸŽไนก่ง„ๅˆ’ๅธˆ็š„ๅ•้กน้€‰ๆ‹ฉ้ข˜๏ผŒ่ฏท้€‰ๅ‡บๅ…ถไธญ็š„ๆญฃ็กฎ็ญ”ๆกˆใ€‚\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_urban_and_rural_planner"
lm-evaluation/lm_eval/tasks/csatqa/_default_csatqa_yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: csatqa
2
+ dataset_path: EleutherAI/csatqa
3
+ test_split: test
4
+ output_type: multiple_choice
5
+ process_docs: !function utils.process_docs
6
+ doc_to_text: "{{question}}"
7
+ doc_to_choice: "{{choices}}"
8
+ doc_to_target: "{{gold}}"
9
+ metric_list:
10
+ - metric: acc
11
+ aggregation: mean
12
+ higher_is_better: true
13
+ - metric: acc_norm
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ metadata:
17
+ version: 0.0
lm-evaluation/lm_eval/tasks/csatqa/_generate_configs.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import yaml
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.logger import eval_logger
11
+
12
+
13
+ SUBSETS = ["WR", "GR", "RCS", "RCSS", "RCH", "LI"]
14
+
15
+
16
+ def parse_args():
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument("--base_yaml_path", required=True)
19
+ parser.add_argument("--save_prefix_path", default="csatqa")
20
+ parser.add_argument("--task_prefix", default="")
21
+ return parser.parse_args()
22
+
23
+
24
+ if __name__ == "__main__":
25
+ args = parse_args()
26
+
27
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
28
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
29
+ with open(args.base_yaml_path, encoding="utf-8") as f:
30
+ base_yaml = yaml.full_load(f)
31
+
32
+ for name in tqdm(SUBSETS):
33
+ yaml_dict = {
34
+ "include": base_yaml_name,
35
+ "task": f"csatqa_{args.task_prefix}_{name}"
36
+ if args.task_prefix != ""
37
+ else f"csatqa_{name.lower()}",
38
+ "dataset_name": name,
39
+ }
40
+
41
+ file_save_path = args.save_prefix_path + f"_{name.lower()}.yaml"
42
+ eval_logger.info(f"Saving yaml for subset {name} to {file_save_path}")
43
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
44
+ yaml.dump(
45
+ yaml_dict,
46
+ yaml_file,
47
+ width=float("inf"),
48
+ allow_unicode=True,
49
+ default_style='"',
50
+ )
lm-evaluation/lm_eval/tasks/csatqa/csatqa_gr.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "GR"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_gr"
lm-evaluation/lm_eval/tasks/csatqa/csatqa_li.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "LI"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_li"
lm-evaluation/lm_eval/tasks/csatqa/csatqa_rch.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "RCH"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_rch"
lm-evaluation/lm_eval/tasks/csatqa/csatqa_rcs.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "RCS"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_rcs"
lm-evaluation/lm_eval/tasks/csatqa/csatqa_rcss.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "RCSS"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_rcss"
lm-evaluation/lm_eval/tasks/csatqa/csatqa_wr.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ "dataset_name": "WR"
2
+ "include": "_default_csatqa_yaml"
3
+ "task": "csatqa_wr"
lm-evaluation/lm_eval/tasks/csatqa/utils.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
5
+ def _process_doc(doc):
6
+ instruction = f"""๋‹ค์Œ์„ ์ฝ๊ณ  ์ •๋‹ต์œผ๋กœ ์•Œ๋งž์€ ๊ฒƒ์„ ๊ณ ๋ฅด์‹œ์š”.
7
+ ### Context: {doc["context"]}
8
+ ### Question: {doc["question"]}
9
+ ### Options:
10
+ (1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']}
11
+ ### Answer: ์ฃผ์–ด์ง„ ๋ฌธ์ œ์˜ ์ •๋‹ต์€"""
12
+
13
+ out_doc = {
14
+ "question": instruction,
15
+ "choices": ["(1)", "(2)", "(3)", "(4)", "(5)"],
16
+ "gold": int(doc["gold"]) - 1,
17
+ }
18
+ return out_doc
19
+
20
+ return dataset.map(_process_doc)
lm-evaluation/lm_eval/tasks/glue/qnli/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: qnli
3
+ dataset_path: glue
4
+ dataset_name: qnli
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["yes", "no"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0