applied-ai-018 commited on
Commit
55fbf53
·
verified ·
1 Parent(s): db09549

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  2. lm-evaluation/lm_eval/tasks/aclue/_default_template_yaml +19 -0
  3. lm-evaluation/lm_eval/tasks/aclue/_generate_configs.py +81 -0
  4. lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_literature.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/blimp/animate_subject_trans.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/blimp/complex_NP_island.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/blimp/existential_there_subject_raising.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/blimp/only_npi_licensor_present.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/blimp/principle_A_case_1.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_2.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/blimp/wh_island.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/blimp/wh_questions_object_gap.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/cmmlu/README.md +48 -0
  37. lm-evaluation/lm_eval/tasks/cmmlu/_default_template_yaml +19 -0
  38. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml +4 -0
lm-evaluation/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
lm-evaluation/lm_eval/tasks/aclue/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: aclue
2
+ dataset_path: tyouisen/aclue
3
+ test_split: test
4
+ fewshot_split: dev
5
+ fewshot_config:
6
+ sampler: first_n
7
+ output_type: multiple_choice
8
+ doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.0
lm-evaluation/lm_eval/tasks/aclue/_generate_configs.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import yaml
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.utils import eval_logger
11
+
12
+
13
+ SUBJECTS = {
14
+ "古文单字多义": "polysemy_resolution",
15
+ "诗词情感分类": "poetry_sentiment_analysis",
16
+ "古汉语命名体识别": "named_entity_recognition",
17
+ "古汉语知识": "basic_ancient_chinese",
18
+ "古诗词上下句预测": "poetry_context_prediction",
19
+ "古文断句": "sentence_segmentation",
20
+ "对联": "couplet_prediction",
21
+ "古诗词曲鉴赏": "poetry_appreciate",
22
+ "国学常识": "ancient_chinese_culture",
23
+ "古音学": "ancient_phonetics",
24
+ "通假字": "homographic_character_resolution",
25
+ "古代文学知识": "ancient_literature",
26
+ "医古文": "ancient_medical",
27
+ "古诗词质量评估": "poetry_quality_assessment",
28
+ "古文阅读理解": "reading_comprehension",
29
+ }
30
+
31
+
32
+ def parse_args():
33
+ parser = argparse.ArgumentParser()
34
+ parser.add_argument("--base_yaml_path", required=True)
35
+ parser.add_argument("--save_prefix_path", default="aclue")
36
+ parser.add_argument("--cot_prompt_path", default=None)
37
+ parser.add_argument("--task_prefix", default="")
38
+ return parser.parse_args()
39
+
40
+
41
+ if __name__ == "__main__":
42
+ args = parse_args()
43
+
44
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
45
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
46
+ with open(args.base_yaml_path, encoding="utf-8") as f:
47
+ base_yaml = yaml.full_load(f)
48
+
49
+ if args.cot_prompt_path is not None:
50
+ import json
51
+
52
+ with open(args.cot_prompt_path, encoding="utf-8") as f:
53
+ cot_file = json.load(f)
54
+
55
+ for subject_zh, subject_eng in tqdm(SUBJECTS.items()):
56
+ if args.cot_prompt_path is not None:
57
+ description = cot_file[subject_eng]
58
+ else:
59
+ description = (
60
+ f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n"
61
+ )
62
+
63
+ yaml_dict = {
64
+ "include": base_yaml_name,
65
+ "task": f"aclue_{args.task_prefix}_{subject_eng}"
66
+ if args.task_prefix != ""
67
+ else f"aclue_{subject_eng}",
68
+ "dataset_name": subject_eng,
69
+ "description": description,
70
+ }
71
+
72
+ file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
73
+ eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}")
74
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
75
+ yaml.dump(
76
+ yaml_dict,
77
+ yaml_file,
78
+ width=float("inf"),
79
+ allow_unicode=True,
80
+ default_style='"',
81
+ )
lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_chinese_culture"
2
+ "description": "以下是关于国学常识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_chinese_culture"
lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_literature.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_literature"
2
+ "description": "以下是关于古代文学知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_literature"
lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_phonetics"
2
+ "description": "以下是关于古音学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_phonetics"
lm-evaluation/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "basic_ancient_chinese"
2
+ "description": "以下是关于古汉语知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_basic_ancient_chinese"
lm-evaluation/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "couplet_prediction"
2
+ "description": "以下是关于对联的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_couplet_prediction"
lm-evaluation/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "homographic_character_resolution"
2
+ "description": "以下是关于通假字的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_homographic_character_resolution"
lm-evaluation/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "named_entity_recognition"
2
+ "description": "以下是关于古汉语命名体识别的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_named_entity_recognition"
lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_appreciate"
2
+ "description": "以下是关于古诗词曲鉴赏的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_appreciate"
lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_context_prediction"
2
+ "description": "以下是关于古诗词上下句预测的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_context_prediction"
lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_quality_assessment"
2
+ "description": "以下是关于古诗词质量评估的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_quality_assessment"
lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_sentiment_analysis"
2
+ "description": "以下是关于诗词情感分类的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_sentiment_analysis"
lm-evaluation/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "reading_comprehension"
2
+ "description": "以下是关于古文阅读理解的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_reading_comprehension"
lm-evaluation/lm_eval/tasks/blimp/animate_subject_trans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: animate_subject_trans
3
+ include: _template_yaml
4
+ task: blimp_animate_subject_trans
lm-evaluation/lm_eval/tasks/blimp/complex_NP_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: complex_NP_island
3
+ include: _template_yaml
4
+ task: blimp_complex_NP_island
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_irregular_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_irregular_1
lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_irregular_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_irregular_2
lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: distractor_agreement_relational_noun
3
+ include: _template_yaml
4
+ task: blimp_distractor_agreement_relational_noun
lm-evaluation/lm_eval/tasks/blimp/existential_there_subject_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_subject_raising
3
+ include: _template_yaml
4
+ task: blimp_existential_there_subject_raising
lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_past_participle_verbs
3
+ include: _template_yaml
4
+ task: blimp_irregular_past_participle_verbs
lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_plural_subject_verb_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_irregular_plural_subject_verb_agreement_2
lm-evaluation/lm_eval/tasks/blimp/only_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: only_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_only_npi_licensor_present
lm-evaluation/lm_eval/tasks/blimp/principle_A_case_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_case_1
3
+ include: _template_yaml
4
+ task: blimp_principle_A_case_1
lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_domain_2
3
+ include: _template_yaml
4
+ task: blimp_principle_A_domain_2
lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: regular_plural_subject_verb_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_regular_plural_subject_verb_agreement_1
lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sentential_negation_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_sentential_negation_npi_licensor_present
lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: superlative_quantifiers_1
3
+ include: _template_yaml
4
+ task: blimp_superlative_quantifiers_1
lm-evaluation/lm_eval/tasks/blimp/wh_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_island
3
+ include: _template_yaml
4
+ task: blimp_wh_island
lm-evaluation/lm_eval/tasks/blimp/wh_questions_object_gap.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_questions_object_gap
3
+ include: _template_yaml
4
+ task: blimp_wh_questions_object_gap
lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_questions_subject_gap
3
+ include: _template_yaml
4
+ task: blimp_wh_questions_subject_gap
lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_questions_subject_gap_long_distance
3
+ include: _template_yaml
4
+ task: blimp_wh_questions_subject_gap_long_distance
lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_vs_that_no_gap_long_distance
3
+ include: _template_yaml
4
+ task: blimp_wh_vs_that_no_gap_long_distance
lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_vs_that_with_gap
3
+ include: _template_yaml
4
+ task: blimp_wh_vs_that_with_gap
lm-evaluation/lm_eval/tasks/cmmlu/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CMMLU
2
+
3
+ ### Paper
4
+
5
+ CMMLU: Measuring massive multitask language understanding in Chinese
6
+ https://arxiv.org/abs/2306.09212
7
+
8
+ CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
9
+ CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
10
+
11
+ Homepage: https://github.com/haonan-li/CMMLU
12
+
13
+ ### Citation
14
+
15
+ ```bibtex
16
+ @misc{li2023cmmlu,
17
+ title={CMMLU: Measuring massive multitask language understanding in Chinese},
18
+ author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
19
+ year={2023},
20
+ eprint={2306.09212},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.CL}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ - `cmmlu`: All 67 subjects of the CMMLU dataset, evaluated following the methodology in MMLU's original implementation.
31
+
32
+ #### Tasks
33
+
34
+
35
+ The following tasks evaluate subjects in the CMMLU dataset using loglikelihood-based multiple-choice scoring:
36
+ - `cmmlu_{subject_english}`
37
+
38
+ ### Checklist
39
+
40
+ * [x] Is the task an existing benchmark in the literature?
41
+ * [x] Have you referenced the original paper that introduced the task?
42
+ * [x] If yes, does the original paper provide a reference implementation?
43
+ * [x] Yes, original implementation contributed by author of the benchmark
44
+
45
+ If other tasks on this dataset are already supported:
46
+ * [x] Is the "Main" variant of this task clearly denoted?
47
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
48
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/cmmlu/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: cmmlu
2
+ dataset_path: haonan-li/cmmlu
3
+ test_split: test
4
+ fewshot_split: dev
5
+ fewshot_config:
6
+ sampler: first_n
7
+ output_type: multiple_choice
8
+ doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.0
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "agronomy"
2
+ "description": "以下是关于农学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_agronomy"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_chinese"
2
+ "description": "以下是关于古汉语的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_ancient_chinese"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_driving_rule"
2
+ "description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_driving_rule"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_food_culture"
2
+ "description": "以下是关于中国饮食文化的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_food_culture"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_foreign_policy"
2
+ "description": "以下是关于中国外交政策的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_foreign_policy"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_history"
2
+ "description": "以下是关于中国历史的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_history"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_teacher_qualification"
2
+ "description": "以下是关于中国教师资格的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_teacher_qualification"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "clinical_knowledge"
2
+ "description": "以下是关于临床知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_clinical_knowledge"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_education"
2
+ "description": "以下是关于大学教育学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_college_education"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_engineering_hydrology"
2
+ "description": "以下是关于大学工程水文学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_college_engineering_hydrology"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_medicine"
2
+ "description": "以下是关于大学医学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_college_medicine"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "computer_security"
2
+ "description": "以下是关于计算机安全的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_computer_security"
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "construction_project_management"
2
+ "description": "以下是关于建设工程管理的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_construction_project_management"