diff --git a/lm-evaluation/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc b/lm-evaluation/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90ef5fd62a86982f371a5cd4139ebe10d2f34eea Binary files /dev/null and b/lm-evaluation/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc differ diff --git a/lm-evaluation/lm_eval/tasks/aclue/_default_template_yaml b/lm-evaluation/lm_eval/tasks/aclue/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..d93ce382d57e8a449868deb79ab544551f7e605b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/_default_template_yaml @@ -0,0 +1,19 @@ +group: aclue +dataset_path: tyouisen/aclue +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/aclue/_generate_configs.py b/lm-evaluation/lm_eval/tasks/aclue/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..182ac51b98f17181a35afccbc887338304216d33 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/_generate_configs.py @@ -0,0 +1,81 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import argparse +import os + +import yaml +from tqdm import tqdm + +from lm_eval.utils import eval_logger + + +SUBJECTS = { + "古文单字多义": "polysemy_resolution", + "诗词情感分类": "poetry_sentiment_analysis", + "古汉语命名体识别": "named_entity_recognition", + "古汉语知识": "basic_ancient_chinese", + "古诗词上下句预测": "poetry_context_prediction", + "古文断句": "sentence_segmentation", + "对联": "couplet_prediction", + "古诗词曲鉴赏": "poetry_appreciate", + "国学常识": "ancient_chinese_culture", + "古音学": "ancient_phonetics", + "通假字": "homographic_character_resolution", + "古代文学知识": "ancient_literature", + "医古文": "ancient_medical", + "古诗词质量评估": "poetry_quality_assessment", + "古文阅读理解": "reading_comprehension", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="aclue") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path, encoding="utf-8") as f: + cot_file = json.load(f) + + for subject_zh, subject_eng in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = ( + f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n" + ) + + yaml_dict = { + "include": base_yaml_name, + "task": f"aclue_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"aclue_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9f52077dedd24ce500247a4b606eea83fac6320 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_chinese_culture" +"description": "以下是关于国学常识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_chinese_culture" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_literature.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..641befa3aa1920d8dca1c7007a4fe8cd24ab8e77 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_literature" +"description": "以下是关于古代文学知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_literature" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fe908e531a07466a66f58f2f5009d5111d5a02d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_phonetics" +"description": "以下是关于古音学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_phonetics" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5afb88be88b8778fde06cff3a2084bce14397174 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "basic_ancient_chinese" +"description": "以下是关于古汉语知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_basic_ancient_chinese" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..63124eed8eb2c2987e7145ee4633e010407641be --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml @@ -0,0 +1,4 @@ +"dataset_name": "couplet_prediction" +"description": "以下是关于对联的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_couplet_prediction" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d50e35d5f31badfc13b1815fffa487b3fc64c82 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml @@ -0,0 +1,4 @@ +"dataset_name": "homographic_character_resolution" +"description": "以下是关于通假字的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_homographic_character_resolution" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..566e93019b994528bb003f46fb458ed725ef8af1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "named_entity_recognition" +"description": "以下是关于古汉语命名体识别的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_named_entity_recognition" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4642992674a1f159fe101859dead4509df6c8166 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_appreciate" +"description": "以下是关于古诗词曲鉴赏的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_appreciate" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b408b659657b4677e056f93c59f2a59ef60cb95 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_context_prediction" +"description": "以下是关于古诗词上下句预测的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_context_prediction" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7a7bee2c4ca59e0dc7b2f3fdc08371a9a585d42 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_quality_assessment" +"description": "以下是关于古诗词质量评估的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_quality_assessment" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e1367f8043d7e1e9ebcd01dfbaacfbdeb0f9fec --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_sentiment_analysis" +"description": "以下是关于诗词情感分类的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_sentiment_analysis" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92f2455d8089bcc3b7d1ff8b99c03144b5b7d61d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml @@ -0,0 +1,4 @@ +"dataset_name": "reading_comprehension" +"description": "以下是关于古文阅读理解的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_reading_comprehension" diff --git a/lm-evaluation/lm_eval/tasks/blimp/animate_subject_trans.yaml b/lm-evaluation/lm_eval/tasks/blimp/animate_subject_trans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d15eb2c77d454ae8e2791cac85601a803f4bd785 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/animate_subject_trans.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: animate_subject_trans +include: _template_yaml +task: blimp_animate_subject_trans diff --git a/lm-evaluation/lm_eval/tasks/blimp/complex_NP_island.yaml b/lm-evaluation/lm_eval/tasks/blimp/complex_NP_island.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4ccfe41fa0e6e5d3b8d5b46d6f2edaac60606f9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/complex_NP_island.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: complex_NP_island +include: _template_yaml +task: blimp_complex_NP_island diff --git a/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml b/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57f12ecade63b595378cb2c9aadf710725e9d4b0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: determiner_noun_agreement_with_adj_irregular_1 +include: _template_yaml +task: blimp_determiner_noun_agreement_with_adj_irregular_1 diff --git a/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml b/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6df0e7d52df67c979fb74a440a113addb0c434bf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: determiner_noun_agreement_with_adj_irregular_2 +include: _template_yaml +task: blimp_determiner_noun_agreement_with_adj_irregular_2 diff --git a/lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml b/lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16e3c0217ee09d554edbe8210ff6c78375d267a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: distractor_agreement_relational_noun +include: _template_yaml +task: blimp_distractor_agreement_relational_noun diff --git a/lm-evaluation/lm_eval/tasks/blimp/existential_there_subject_raising.yaml b/lm-evaluation/lm_eval/tasks/blimp/existential_there_subject_raising.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45e18aebb660ed759099230686c0e1ae24ea3f86 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/existential_there_subject_raising.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: existential_there_subject_raising +include: _template_yaml +task: blimp_existential_there_subject_raising diff --git a/lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml b/lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..906fb347710e46c3159aaee05def45730b30929f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irregular_past_participle_verbs +include: _template_yaml +task: blimp_irregular_past_participle_verbs diff --git a/lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml b/lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d3b84fceab0e3907ab6b1bd3e44a0e6c9445416 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irregular_plural_subject_verb_agreement_2 +include: _template_yaml +task: blimp_irregular_plural_subject_verb_agreement_2 diff --git a/lm-evaluation/lm_eval/tasks/blimp/only_npi_licensor_present.yaml b/lm-evaluation/lm_eval/tasks/blimp/only_npi_licensor_present.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dbce62337d39d44aed2f0f14cfd51dec367a42c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/only_npi_licensor_present.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: only_npi_licensor_present +include: _template_yaml +task: blimp_only_npi_licensor_present diff --git a/lm-evaluation/lm_eval/tasks/blimp/principle_A_case_1.yaml b/lm-evaluation/lm_eval/tasks/blimp/principle_A_case_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..552f8a1e2423a6a4b7c1ea6a57b10f15fdbdbd1d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/principle_A_case_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: principle_A_case_1 +include: _template_yaml +task: blimp_principle_A_case_1 diff --git a/lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_2.yaml b/lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec3be9a64d0bb5a408a905ed1b72c0b3eaf603c9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/principle_A_domain_2.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: principle_A_domain_2 +include: _template_yaml +task: blimp_principle_A_domain_2 diff --git a/lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml b/lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d4df1f7216513f772006c5742917f692e827d59 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: regular_plural_subject_verb_agreement_1 +include: _template_yaml +task: blimp_regular_plural_subject_verb_agreement_1 diff --git a/lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml b/lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df607e5c79e02ef8b284ce2b458ba5371951fc89 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentential_negation_npi_licensor_present +include: _template_yaml +task: blimp_sentential_negation_npi_licensor_present diff --git a/lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml b/lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3cf8bfc238feb272c290621c9d55772cb6f5dc4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: superlative_quantifiers_1 +include: _template_yaml +task: blimp_superlative_quantifiers_1 diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_island.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_island.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b665096a09297695eb40f791faeb81b7d9b7f56 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_island.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_island +include: _template_yaml +task: blimp_wh_island diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_questions_object_gap.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_object_gap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb78e7b917573f4c8be60508f454a9ddd6e2b668 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_object_gap.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_questions_object_gap +include: _template_yaml +task: blimp_wh_questions_object_gap diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b956919c455893a0282a7d3842fc57eefe624114 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_questions_subject_gap +include: _template_yaml +task: blimp_wh_questions_subject_gap diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34c3e5cf7f141db947d42b945262de6849700d3c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_questions_subject_gap_long_distance +include: _template_yaml +task: blimp_wh_questions_subject_gap_long_distance diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4804f67ae82cb4a5af702d80eeded6bd6aacd54f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_vs_that_no_gap_long_distance +include: _template_yaml +task: blimp_wh_vs_that_no_gap_long_distance diff --git a/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml b/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca5af7a576a5ad6f15544cb748f857a549d90295 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_vs_that_with_gap +include: _template_yaml +task: blimp_wh_vs_that_with_gap diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/README.md b/lm-evaluation/lm_eval/tasks/cmmlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7690c205c45e0c425acb025940097f10ad181c73 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/README.md @@ -0,0 +1,48 @@ +# CMMLU + +### Paper + +CMMLU: Measuring massive multitask language understanding in Chinese +https://arxiv.org/abs/2306.09212 + +CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture. +CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels. + +Homepage: https://github.com/haonan-li/CMMLU + +### Citation + +```bibtex +@misc{li2023cmmlu, + title={CMMLU: Measuring massive multitask language understanding in Chinese}, + author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin}, + year={2023}, + eprint={2306.09212}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +- `cmmlu`: All 67 subjects of the CMMLU dataset, evaluated following the methodology in MMLU's original implementation. + +#### Tasks + + +The following tasks evaluate subjects in the CMMLU dataset using loglikelihood-based multiple-choice scoring: +- `cmmlu_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/_default_template_yaml b/lm-evaluation/lm_eval/tasks/cmmlu/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2e0a8876c6963ff17f8da6bef14063d3b0d92a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/_default_template_yaml @@ -0,0 +1,19 @@ +group: cmmlu +dataset_path: haonan-li/cmmlu +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..034ce661d6e72e35fdef2b7cddb94d00d7aec0ef --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "agronomy" +"description": "以下是关于农学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_agronomy" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2099e0a12de41328eed39ebafb5ec940ee4aac5a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_chinese" +"description": "以下是关于古汉语的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_ancient_chinese" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2dce17c0f0c1f9a99aff32ee633eab90026e823f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_driving_rule" +"description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_driving_rule" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52400c56bc4b6e39af23137c179f53102b7009a6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_food_culture" +"description": "以下是关于中国饮食文化的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_food_culture" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbb34a96a36cf9db8f68fe7047b2c81260afdd6f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_foreign_policy" +"description": "以下是关于中国外交政策的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_foreign_policy" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26fe1db5ac3039018f02bfa55e118cbba1db4ed8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_history" +"description": "以下是关于中国历史的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_history" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..226e98a92e435abefc82c34fad8755c80ea42448 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_teacher_qualification" +"description": "以下是关于中国教师资格的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_teacher_qualification" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ed36425f2c3b866e62e0ac9b38dd0aeab118916 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "以下是关于临床知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_clinical_knowledge" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml new file mode 100644 index 0000000000000000000000000000000000000000..952f351cb005d300becc2f5e3b7d5b8579b979a5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_education" +"description": "以下是关于大学教育学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_education" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d52288a4d96e3eee909a7f33c845ba2fa9590aba --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_engineering_hydrology" +"description": "以下是关于大学工程水文学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_engineering_hydrology" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dff0d7860fee5dbd289ffaf2d61215fc1c79707b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "以下是关于大学医学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_medicine" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9471546184de5dde5edeb8031a64e588c7594f8f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "以下是关于计算机安全的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_computer_security" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86265b0804a30e1d2352ff79bcaaa8de3c15316f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "construction_project_management" +"description": "以下是关于建设工程管理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_construction_project_management" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4602efb430d49e3a876b7243c4cfffe506094b34 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "economics" +"description": "以下是关于经济学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_economics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2bb920b53ab8856d717fea8e07e87077ec3b3f71 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "以下是关于电气工程的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_electrical_engineering" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f702312ca07c2b882d17c88d30dbe87a837ce5c6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "以下是关于初等数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be57628b6f0d3dd2bc6719e08f9aaddb45ac7fa2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "genetics" +"description": "以下是关于遗传学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_genetics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34e99ea0f47b7017206bd6e9078ca7a5c2b25f0e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "以下是关于高中化学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_chemistry" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c73ebe9171df9e9f0fbdf2fecddb251e56884702 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "以下是关于高中地理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_geography" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3598501c1763d5f1c19444e1b18bb242149fdd34 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "以下是关于高中数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32112d3c8b6ee26ee786439053c2d1f1da5b04c2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "以下是关于国际法学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_international_law" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab40da40bafeb56459ae462b795be8c8584fb02a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "以下是关于法理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_jurisprudence" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a729641f9059060ec9abadeac611cf3e74528165 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "以下是关于市场营销的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_marketing" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f99fa17514a10e8bf587b50ae9dd997b80c00225 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marxist_theory" +"description": "以下是关于马克思主义理论的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_marxist_theory" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23d52c45e07134b2ff4f7c1a8e55ba19acfbcfd9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "以下是关于营养学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_nutrition" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17340fa490f0350e6e532b2c67f8c81fa63bfb3a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "以下是关于哲学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_philosophy" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bed3485d787d921fb25bbbfbad7671118acfc42b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "以下是关于专业会计的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_accounting" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dec4c6cf1d7b095fab8fb293b9cf7600765f24db --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "以下是关于专业法学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_law" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83f0255591a17711d6ac99cf164a29ffe2a69866 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "以下是关于专业心理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_psychology" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a1c3711ef7734df27852065cf894f9c9cff9d776 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "以下是关于公共关系的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_public_relations" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed4627deefd6a9a1737cc700604b940b31635cf8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "traditional_chinese_medicine" +"description": "以下是关于中医中药的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_traditional_chinese_medicine" diff --git a/lm-evaluation/lm_eval/tasks/drop/README.md b/lm-evaluation/lm_eval/tasks/drop/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b7fc47b7165034bd74c524048f5f54ea8d041cf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/drop/README.md @@ -0,0 +1,53 @@ +# DROP + +### Paper + +Title: `DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs` + +Abstract: https://aclanthology.org/attachments/N19-1246.Supplementary.pdf + +DROP is a QA dataset which tests comprehensive understanding of paragraphs. In +this crowdsourced, adversarially-created, 96k question-answering benchmark, a +system must resolve multiple references in a question, map them onto a paragraph, +and perform discrete operations over them (such as addition, counting, or sorting). + +Homepage: https://allenai.org/data/drop + +Acknowledgement: This implementation is based on the official evaluation for `DROP`: +https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py + +### Citation + +``` +@misc{dua2019drop, + title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs}, + author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner}, + year={2019}, + eprint={1903.00161}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `drop` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/kobest/kobest_sentineg.yaml b/lm-evaluation/lm_eval/tasks/kobest/kobest_sentineg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64319dca39c520c7a8f9c4f20f0ae2a9e44b7230 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kobest/kobest_sentineg.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_sentineg +dataset_path: skt/kobest_v1 +dataset_name: sentineg +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.sentineg_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ["부정", "긍정"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/logiqa/README.md b/lm-evaluation/lm_eval/tasks/logiqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e9dff3c546f2cf690f1079130a8b9b61db492d78 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/logiqa/README.md @@ -0,0 +1,52 @@ +# LogiQA + +### Paper + +Title: `LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning` + +Abstract: https://arxiv.org/abs/2007.08124 + +LogiQA is a dataset for testing human logical reasoning. It consists of 8,678 QA +instances, covering multiple types of deductive reasoning. Results show that state- +of-the-art neural models perform by far worse than human ceiling. The dataset can +also serve as a benchmark for reinvestigating logical AI under the deep learning +NLP setting. + +Homepage: https://github.com/lgw863/LogiQA-dataset + + +### Citation + +``` +@misc{liu2020logiqa, + title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning}, + author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang}, + year={2020}, + eprint={2007.08124}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `logiqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/logiqa/logiqa.yaml b/lm-evaluation/lm_eval/tasks/logiqa/logiqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e318b7d16053ff28069b91edddea095c2f0ca36 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/logiqa/logiqa.yaml @@ -0,0 +1,23 @@ +task: logiqa +dataset_path: EleutherAI/logiqa +dataset_name: logiqa +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_choice: "{{options}}" +doc_to_text: !function utils_logiqa.doc_to_text +doc_to_target: !function utils_logiqa.doc_to_target +doc_to_decontamination_query: "{{context}}" +should_decontaminate: true +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/logiqa/utils_logiqa.py b/lm-evaluation/lm_eval/tasks/logiqa/utils_logiqa.py new file mode 100644 index 0000000000000000000000000000000000000000..c462db9115f8747097e3af4f4e7a3c5a1165a1bd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/logiqa/utils_logiqa.py @@ -0,0 +1,24 @@ +# Copied from Master +def doc_to_text(doc) -> str: + """ + Passage: + Question: + Choices: + A. + B. + C. + D. + Answer: + """ + choices = ["a", "b", "c", "d"] + prompt = "Passage: " + doc["context"] + "\n" + prompt += "Question: " + doc["question"] + "\nChoices:\n" + for choice, option in zip(choices, doc["options"]): + prompt += f"{choice.upper()}. {option}\n" + prompt += "Answer:" + return prompt + + +def doc_to_target(doc) -> int: + choices = ["a", "b", "c", "d"] + return choices.index(doc["label"].strip()) diff --git a/lm-evaluation/lm_eval/tasks/openbookqa/README.md b/lm-evaluation/lm_eval/tasks/openbookqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..34849ac628176dc9fe48bf6239c77a494b97ac3d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/openbookqa/README.md @@ -0,0 +1,54 @@ +# OpenBookQA + +### Paper + +Title: `Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering` + +Abstract: https://arxiv.org/abs/1809.02789 + +OpenBookQA is a question-answering dataset modeled after open book exams for +assessing human understanding of a subject. It consists of 5,957 multiple-choice +elementary-level science questions (4,957 train, 500 dev, 500 test), which probe +the understanding of a small “book” of 1,326 core science facts and the application +of these facts to novel situations. For training, the dataset includes a mapping +from each question to the core science fact it was designed to probe. Answering +OpenBookQA questions requires additional broad common knowledge, not contained +in the book. The questions, by design, are answered incorrectly by both a retrieval- +based algorithm and a word co-occurrence algorithm. + +Homepage: https://allenai.org/data/open-book-qa + + +### Citation + +``` +@inproceedings{OpenBookQA2018, + title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering}, + author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal}, + booktitle={EMNLP}, + year={2018} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `openbookqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/openbookqa/openbookqa.yaml b/lm-evaluation/lm_eval/tasks/openbookqa/openbookqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdfcd19635a0d06d6b4190c27d59ce93de0aef80 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/openbookqa/openbookqa.yaml @@ -0,0 +1,21 @@ +task: openbookqa +dataset_path: openbookqa +dataset_name: main +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: question_stem +doc_to_target: "{{choices.label.index(answerKey.lstrip())}}" +doc_to_choice: "{{choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: question_stem +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0