diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/README.md b/lm-evaluation-harness/lm_eval/tasks/ceval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..091b8bb6e26e6584a1ec19afd02331b36f604ad9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/README.md @@ -0,0 +1,127 @@ +# C-Eval (Validation) + +### Paper +C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models +https://arxiv.org/pdf/2305.08322.pdf + +C-Eval is a comprehensive Chinese evaluation suite for foundation models. +It consists of 13948 multi-choice questions spanning 52 diverse disciplines +and four difficulty levels. + +Homepage: https://cevalbenchmark.com/ + +### Citation + +```bibtex +@article{huang2023ceval, + title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models}, + author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian}, + journal={arXiv preprint arXiv:2305.08322}, + year={2023} +} +``` + + +SUBJECTS = { + "computer_network":"计算机网络", + "operating_system":"操作系统", + "computer_architecture":"计算机组成", + "college_programming":"大学编程", + "college_physics":"大学物理", + "college_chemistry":"大学化学", + "advanced_mathematics":"高等数学", + "probability_and_statistics":"概率统计", + "discrete_mathematics":"离散数学", + "electrical_engineer":"注册电气工程师", + "metrology_engineer":"注册计量师", + "high_school_mathematics":"高中数学", + "high_school_physics":"高中物理", + "high_school_chemistry":"高中化学", + "high_school_biology":"高中生物", + "middle_school_mathematics":"初中数学", + "middle_school_biology":"初中生物", + "middle_school_physics":"初中物理", + "middle_school_chemistry":"初中化学", + "veterinary_medicine":"兽医学", + "college_economics":"大学经济学", + "business_administration":"工商管理", + "marxism":"马克思主义基本原理", + "mao_zedong_thought":"毛泽东思想和中国特色社会主义理论体系概论", + "education_science":"教育学", + "teacher_qualification":"教师资格", + "high_school_politics":"高中政治", + "high_school_geography":"高中地理", + "middle_school_politics":"初中政治", + "middle_school_geography":"初中地理", + "modern_chinese_history":"近代史纲要", + "ideological_and_moral_cultivation":"思想道德修养与法律基础", + "logic":"逻辑学", + "law":"法学", + "chinese_language_and_literature":"中国语言文学", + "art_studies":"艺术学", + "professional_tour_guide":"导游资格", + "legal_professional":"法律职业资格", + "high_school_chinese":"高中语文", + "high_school_history":"高中历史", + "middle_school_history":"初中历史", + "civil_servant":"公务员", + "sports_science":"体育学", + "plant_protection":"植物保护", + "basic_medicine":"基础医学", + "clinical_medicine":"临床医学", + "urban_and_rural_planner":"注册城乡规划师", + "accountant":"注册会计师", + "fire_engineer":"注册消防工程师", + "environmental_impact_assessment_engineer":"环境影响评价工程师", + "tax_accountant":"税务师", + "physician":"医师资格" +} + + +# CMMLU + +### Paper + +CMMLU: Measuring massive multitask language understanding in Chinese +https://arxiv.org/abs/2306.09212 + +CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture. +CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels. + +Homepage: https://github.com/haonan-li/CMMLU + +### Citation + +```bibtex +@misc{li2023cmmlu, + title={CMMLU: Measuring massive multitask language understanding in Chinese}, + author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin}, + year={2023}, + eprint={2306.09212}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +- `ceval-valid`: All 52 subjects of the C-Eval dataset, evaluated following the methodology in MMLU's original implementation. This implementation consists solely of the validation set of C-Eval, as the test set requires submission of model predictions to an external site. + +#### Tasks + + +The following tasks evaluate subjects in the C-Eval dataset using loglikelihood-based multiple-choice scoring: +- `ceval-valid_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/_default_ceval_yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/_default_ceval_yaml new file mode 100644 index 0000000000000000000000000000000000000000..a94d87cb54f8b98bf5f418149b9b54c9b78c2695 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/_default_ceval_yaml @@ -0,0 +1,19 @@ +group: ceval-valid +dataset_path: ceval/ceval-exam +validation_split: val +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_accountant.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_accountant.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04f669eda4c5b27bc8efb719820667040da8ae8c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_accountant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "accountant" +"description": "以下是中国关于注册会计师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_accountant" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9778347b0c6fbc67b7d65b33aba7d9fdb1487a54 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "advanced_mathematics" +"description": "以下是中国关于高等数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_advanced_mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..627226260a6388a13d0f1759b54d8251339eb194 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "art_studies" +"description": "以下是中国关于艺术学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_art_studies" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..907bf8eb361548775047a5cbfe03befb89041dba --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "basic_medicine" +"description": "以下是中国关于基础医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_basic_medicine" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1f96f334259ecadf0504d7bc107c96ef2049a9e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_administration" +"description": "以下是中国关于工商管理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_business_administration" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e285b59d0992148421c5a10094c8ff94e97a87c9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_language_and_literature" +"description": "以下是中国关于中国语言文学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_chinese_language_and_literature" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0aad21b99c3e10fadd916a5a091d7499af718729 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "civil_servant" +"description": "以下是中国关于公务员的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_civil_servant" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97c08d06266727d43161cea97f8a914024a20ca8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_medicine" +"description": "以下是中国关于临床医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_clinical_medicine" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ba89714e621bb9e693a2b738a02027ec70169ef --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "以下是中国关于大学化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10b89f8fd6fc0938dce44f27ad3cc67c83f60178 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_economics" +"description": "以下是中国关于大学经济学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_economics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50e7f01c18e66ad47b6bac6db1f5ce4bc9cfec9a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "以下是中国关于大学物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_physics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4cfe029a7e888aa0da80e3b3ba1c071dd8b7d5cc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_programming" +"description": "以下是中国关于大学编程的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_college_programming" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d17454a73eb34c6eefe32b1bdb9697cbd931e8d3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_architecture" +"description": "以下是中国关于计算机组成的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_computer_architecture" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9391dbbdc8c2f307b0553d401413a5159d46a53f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_network" +"description": "以下是中国关于计算机网络的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_computer_network" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2bd42046300cd3eff136817cbd85031e7b8fecc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "discrete_mathematics" +"description": "以下是中国关于离散数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_discrete_mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_education_science.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_education_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..985edf982226b4ab5a8de90c4cc27b5b4e331405 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_education_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "education_science" +"description": "以下是中国关于教育学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_education_science" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc946b99d36b2ab5215c9ab1458891284a1d93ac --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineer" +"description": "以下是中国关于注册电气工程师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_electrical_engineer" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ddb6fa779bd59c59bf65052f162ea4ddc0018eef --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml @@ -0,0 +1,4 @@ +"dataset_name": "fire_engineer" +"description": "以下是中国关于注册消防工程师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_fire_engineer" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26cbc8b5a8af3bc90363d86e8e0744fcf3b90654 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "以下是中国关于高中生物的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_biology" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e140af2932ba9751ae0617bd50a48ae7c925e3d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "以下是中国关于高中化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2bb10ca6132dd5d3619802d1502240c620986f1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "以下是中国关于高中地理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_geography" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9335bc0f791cb174744b6bfd6d0d612cb6721346 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_history" +"description": "以下是中国关于高中历史的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_history" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abc00afa68ba2ffd8d7e63b4db228d7f379762ff --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "以下是中国关于高中数学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3aa084a99fb253cf0a96db80449217d80927eb6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "以下是中国关于高中物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_physics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7f77391bffa4ae32afac484494a1c5b284a3a0e6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_politics" +"description": "以下是中国关于高中政治的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_high_school_politics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33e341eb2eeebf898641469adb4092e44bb675c9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ideological_and_moral_cultivation" +"description": "以下是中国关于思想道德修养与法律基础的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_ideological_and_moral_cultivation" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_law.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..921709ce12b34c703bb5f5439bac45d188c26e7a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "law" +"description": "以下是中国关于法学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_law" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml new file mode 100644 index 0000000000000000000000000000000000000000..897ed74ffd9c99436fd8d6cec99d79c6b82170be --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml @@ -0,0 +1,4 @@ +"dataset_name": "legal_professional" +"description": "以下是中国关于法律职业资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_legal_professional" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_logic.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12912dafe5af2997bcaefb60bb2850fbca54c749 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logic" +"description": "以下是中国关于逻辑学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_logic" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0899b735436349b9db8aebaa189d9893df7d477d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml @@ -0,0 +1,4 @@ +"dataset_name": "mao_zedong_thought" +"description": "以下是中国关于毛泽东思想和中国特色社会主义理论体系概论的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_mao_zedong_thought" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_marxism.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_marxism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfd3d5dbed6dade2f8e04d0c037b394f5a87d8ab --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_marxism.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marxism" +"description": "以下是中国关于马克思主义基本原理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_marxism" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f8725356987a3c55e8d5b346485a6318bae6c26 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_biology" +"description": "以下是中国关于初中生物的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_biology" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28708b5b0a7fda122443a9fe2bbd980cfb103804 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_chemistry" +"description": "以下是中国关于初中化学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d179a2f592baac85a39d55c6a103203433283b6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_geography" +"description": "以下是中国关于初中地理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_geography" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b61a8ee835d45987b9ff347a9a9a3f9510e7617a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_history" +"description": "以下是中国关于初中历史的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_history" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbe69686af439ee3331ba2b9f8d246b1dd454e55 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_physics" +"description": "以下是中国关于初中物理的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_physics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0cf20c29b47857772b6c8c3f71f4353589b8e69d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "middle_school_politics" +"description": "以下是中国关于初中政治的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_middle_school_politics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1621075b85f65e213009cfc00e530c5f974fd8b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "modern_chinese_history" +"description": "以下是中国关于近代史纲要的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_modern_chinese_history" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c7afea6f22276f496fc6df1a30151f47fabc6b4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml @@ -0,0 +1,4 @@ +"dataset_name": "operating_system" +"description": "以下是中国关于操作系统的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_operating_system" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_physician.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_physician.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66abd59f32c1fd5ac47fa86f8afe2d080f9ad408 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_physician.yaml @@ -0,0 +1,4 @@ +"dataset_name": "physician" +"description": "以下是中国关于医师资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_physician" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d73e014bb71d7a8df5990726fffd44c0eefe679 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml @@ -0,0 +1,4 @@ +"dataset_name": "plant_protection" +"description": "以下是中国关于植物保护的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_plant_protection" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82d1fcbb2f410b47cd2956f82741f25ceefcf118 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "probability_and_statistics" +"description": "以下是中国关于概率统计的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_probability_and_statistics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e670fda482646b59273cecfe58c1af9d8d1e0b4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_tour_guide" +"description": "以下是中国关于导游资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_professional_tour_guide" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e47473994a4765804c6536f71e55c3fda5937279 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sports_science" +"description": "以下是中国关于体育学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_sports_science" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c461a3401b0bddc816486be34039b1832759ebb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml @@ -0,0 +1,4 @@ +"dataset_name": "tax_accountant" +"description": "以下是中国关于税务师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_tax_accountant" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca08e24a7103ebf2114cc9d1b370cb214c19f293 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml @@ -0,0 +1,4 @@ +"dataset_name": "teacher_qualification" +"description": "以下是中国关于教师资格的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_teacher_qualification" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml new file mode 100644 index 0000000000000000000000000000000000000000..957a53fbd6ccff5574cad2b5a325e24086df1ee2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml @@ -0,0 +1,4 @@ +"dataset_name": "urban_and_rural_planner" +"description": "以下是中国关于注册城乡规划师的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_urban_and_rural_planner" diff --git a/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a493fd6518e4513db06949228d9b381f37c75c9b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "veterinary_medicine" +"description": "以下是中国关于兽医学的单项选择题,请选出其中的正确答案。\n\n" +"include": "_default_ceval_yaml" +"task": "ceval-valid_veterinary_medicine" diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/README.md b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ce98279e3eafd134d72658f3db0c9af5eaf755e7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/README.md @@ -0,0 +1,54 @@ +# ETHICS Dataset + +### Paper + +Pointer Sentinel Mixture Models +https://arxiv.org/pdf/1609.07843.pdf + +The ETHICS dataset is a benchmark that spans concepts in justice, well-being, +duties, virtues, and commonsense morality. Models predict widespread moral +judgments about diverse text scenarios. This requires connecting physical and +social world knowledge to value judgements, a capability that may enable us +to steer chatbot outputs or eventually regularize open-ended reinforcement +learning agents. + +Homepage: https://github.com/hendrycks/ethics + +### Citation + +``` +@article{hendrycks2021ethics + title={Aligning AI With Shared Human Values}, + author={Dan Hendrycks and Collin Burns and Steven Basart and Andrew Critch and Jerry Li and Dawn Song and Jacob Steinhardt}, + journal={Proceedings of the International Conference on Learning Representations (ICLR)}, + year={2021} +} +``` + +### Groups and Tasks + +#### Groups + +- `hendrycks_ethics` + +#### Tasks + +* `ethics_cm` +* `ethics_deontology` +* `ethics_justice` +* `ethics_utilitarianism` +* (MISSING) `ethics_utilitarianism_original` +* `ethics_virtue` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + * [ ] Matches v0.3.0 of Eval Harness diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/commonsense.yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/commonsense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1270743670604108d852e75116ec584938eb976 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/commonsense.yaml @@ -0,0 +1,15 @@ +group: + - hendrycks_ethics +task: ethics_cm +dataset_path: EleutherAI/hendrycks_ethics +dataset_name: commonsense +output_type: multiple_choice +training_split: train +test_split: test +doc_to_text: "{{input}}\nQuestion: Is this wrong?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/deontology.yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/deontology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2f36852226e86a817023cf428b00dc3b93ac468 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/deontology.yaml @@ -0,0 +1,9 @@ +include: commonsense.yaml +task: ethics_deontology +dataset_name: deontology +doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}} {{excuse.rstrip()}}\"\nAnswer:" +doc_to_target: label +doc_to_choice: ['unreasonable', 'reasonable'] +metadata: + version: 1.0 +# TODO: implement exact-match metric for this subset diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/justice.yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/justice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ba79d16b3e6108392d987f17cd2698555f4fb80 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/justice.yaml @@ -0,0 +1,9 @@ +include: deontology.yaml +group: + - hendrycks_ethics +task: ethics_justice +dataset_name: justice +doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}}\"\nAnswer:" +# TODO: impl. exact match for this and deontology +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8960a319349067a5ba2542a93e1447cce7899e72 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml @@ -0,0 +1,12 @@ +include: commonsense.yaml +group: + - hendrycks_ethics +task: ethics_utilitarianism +dataset_name: utilitarianism +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml new file mode 100644 index 0000000000000000000000000000000000000000..5583c1b6829e6f0b25093e5b944ea5ca048a0764 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml @@ -0,0 +1,16 @@ +# group: +# - hendrycks_ethics +# task: ethics_utilitarianism_original +# dataset_path: hails/hendrycks_ethics +# dataset_name: utilitarianism +# output_type: winograd_schema +# fewshot_split: null # TODO: implement a special fewshot split for this dataset subsets +# test_split: test +# template_aliases: #"{% set answer_choices = range(1, 11)|list %}" +# doc_to_text: 'Activity: "{{activity}}"\nRating:' +# doc_to_target: "{{answer_choices[label]}}" +# metric_list: +# - metric: acc +# TODO: we want this to be implemented as a winograd_schema task type, actually +# metadata: +# version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utils.py b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff0daa961c20daaa5dde14fe73d464277c1750a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/utils.py @@ -0,0 +1,25 @@ +import random + + +### Utils for `ethics_utilitarianism` task below +def _preproc_doc(doc): + rnd = random.Random(doc["activity"]) + scenarios = [doc["activity"], doc["baseline"]] + ordering = [0, 1] + rnd.shuffle(ordering) + doc = { + "scenarios": [scenarios[ordering[0]], scenarios[ordering[1]]], + # The correct scenario is always first + "label": int(ordering.index(0) == 0), + } + return doc + + +def doc_to_text(doc) -> str: + doc = _preproc_doc(doc) + return f"Scenario 1: {doc['scenarios'][0]}\nScenario 2: {doc['scenarios'][1]}\nQuestion: Is Scenario 1 preferable?\nAnswer:" + + +def doc_to_target(doc): + doc = _preproc_doc(doc) + return doc["label"] diff --git a/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/virtue.yaml b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/virtue.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c236a983dd08c4d43aa072713984a113330534b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hendrycks_ethics/virtue.yaml @@ -0,0 +1,10 @@ +include: commonsense.yaml +group: + - hendrycks_ethics +task: ethics_virtue +dataset_name: virtue +doc_to_text: "Sentence: {{scenario}}\nQuestion: Does the character in this sentence exhibit the trait \"{{trait}}\"?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/openbookqa/README.md b/lm-evaluation-harness/lm_eval/tasks/openbookqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..34849ac628176dc9fe48bf6239c77a494b97ac3d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/openbookqa/README.md @@ -0,0 +1,54 @@ +# OpenBookQA + +### Paper + +Title: `Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering` + +Abstract: https://arxiv.org/abs/1809.02789 + +OpenBookQA is a question-answering dataset modeled after open book exams for +assessing human understanding of a subject. It consists of 5,957 multiple-choice +elementary-level science questions (4,957 train, 500 dev, 500 test), which probe +the understanding of a small “book” of 1,326 core science facts and the application +of these facts to novel situations. For training, the dataset includes a mapping +from each question to the core science fact it was designed to probe. Answering +OpenBookQA questions requires additional broad common knowledge, not contained +in the book. The questions, by design, are answered incorrectly by both a retrieval- +based algorithm and a word co-occurrence algorithm. + +Homepage: https://allenai.org/data/open-book-qa + + +### Citation + +``` +@inproceedings{OpenBookQA2018, + title={Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering}, + author={Todor Mihaylov and Peter Clark and Tushar Khot and Ashish Sabharwal}, + booktitle={EMNLP}, + year={2018} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `openbookqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/openbookqa/openbookqa.yaml b/lm-evaluation-harness/lm_eval/tasks/openbookqa/openbookqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdfcd19635a0d06d6b4190c27d59ce93de0aef80 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/openbookqa/openbookqa.yaml @@ -0,0 +1,21 @@ +task: openbookqa +dataset_path: openbookqa +dataset_name: main +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: question_stem +doc_to_target: "{{choices.label.index(answerKey.lstrip())}}" +doc_to_choice: "{{choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: question_stem +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/qa4mre/README.md b/lm-evaluation-harness/lm_eval/tasks/qa4mre/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3b8dc9fc9c38c09c48d52b2899fd74d639216765 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qa4mre/README.md @@ -0,0 +1,55 @@ +# QA4MRE + +### Paper + +Title: `QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation` + +Abstract: https://www.cs.cmu.edu/~./hovy/papers/13CLEF-QA4MRE.pdf + +The (English only) QA4MRE challenge which was run as a Lab at CLEF 2011-2013. +The main objective of this exercise is to develop a methodology for evaluating +Machine Reading systems through Question Answering and Reading Comprehension +Tests. Systems should be able to extract knowledge from large volumes of text +and use this knowledge to answer questions. Four different tasks have been +organized during these years: Main Task, Processing Modality and Negation for +Machine Reading, Machine Reading of Biomedical Texts about Alzheimer's disease, +and Entrance Exam. + +Homepage: http://nlp.uned.es/clef-qa/repository/qa4mre.php + + +### Citation + +``` +@inproceedings{Peas2013QA4MRE2O, + title={QA4MRE 2011-2013: Overview of Question Answering for Machine Reading Evaluation}, + author={Anselmo Pe{\~n}as and Eduard H. Hovy and Pamela Forner and {\'A}lvaro Rodrigo and Richard F. E. Sutcliffe and Roser Morante}, + booktitle={CLEF}, + year={2013} +} +``` + +### Groups and Tasks + +#### Groups + +* `qa4mre` + +#### Tasks + +* `qa4mre_2011` +* `qa4mre_2012` +* `qa4mre_2013` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/qa4mre/preprocess_qa4mre.py b/lm-evaluation-harness/lm_eval/tasks/qa4mre/preprocess_qa4mre.py new file mode 100644 index 0000000000000000000000000000000000000000..3e07db422b1e20f3d456f0da9f806c76feb1c557 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qa4mre/preprocess_qa4mre.py @@ -0,0 +1,6 @@ +def qa4mre_process(doc): + return int(doc["correct_answer_id"]) - 1 + + +def doc_to_target(doc): + return doc["answer_options"]["answer_str"][qa4mre_process(doc)] diff --git a/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2011.yaml b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2011.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9ceb78094abcf60b378d695936f1548a2d69188 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2011.yaml @@ -0,0 +1,22 @@ +group: + - qa4mre +task: qa4mre_2011 +dataset_path: qa4mre +dataset_name: 2011.main.EN +output_type: multiple_choice +test_split: train +# doc_to_text: "{{document_str.strip()}}\nQuestion: {{question_str}}\nChoices:\n- {{answer_choices|join('\n- ')}}\nAnswer:" +doc_to_text: "{{document_str.strip()}}\nQuestion: {{question_str}}\nAnswer:" +doc_to_target: "{{correct_answer_id|int - 1}}" +doc_to_choice: "{{answer_options.answer_str}}" +should_decontaminate: true +doc_to_decontamination_query: "{{document_str.strip()}} + ' ' + {{question_str}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2012.yaml b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2012.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec015651675e34e3f51b221ef2b35d60092bbc3f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2012.yaml @@ -0,0 +1,4 @@ +include: qa4mre_2011.yaml +task: qa4mre_2012 +dataset_path: qa4mre +dataset_name: 2012.main.EN diff --git a/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2013.yaml b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2013.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08b96e306dcd47e02e06c451692665aef97869ba --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qa4mre/qa4mre_2013.yaml @@ -0,0 +1,4 @@ +include: qa4mre_2011.yaml +task: qa4mre_2013 +dataset_path: qa4mre +dataset_name: 2013.main.EN diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/README.md b/lm-evaluation-harness/lm_eval/tasks/scrolls/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a90e00f4e729711fc6ea7ccd0c375e4686f8970d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/README.md @@ -0,0 +1,31 @@ +""" +SCROLLS: Standardized CompaRison Over Long Language Sequences +https://arxiv.org/abs/2201.03533 + +SCROLLS is a suite of datasets that require synthesizing information over long texts. +The benchmark includes seven natural language tasks across multiple domains, +including summarization, question answering, and natural language inference. + +Homepage: https://www.scrolls-benchmark.com/ + +Since SCROLLS tasks are generally longer than the maximum sequence length of many models, +it is possible to create "subset" tasks that contain only those samples whose tokenized length +is less than some pre-defined limit. For example, to create a subset of "Qasper" that would +be suitable for a model using the GPTNeoX tokenizer and a 4K maximium sequence length: + +``` +class QasperGPTNeoX4K(Qasper): + PRUNE_TOKENIZERS = ["EleutherAI/pythia-410m-deduped"] + PRUNE_MAX_TOKENS = 4096 + PRUNE_NUM_PROC = _num_cpu_cores() # optional, to speed up pruning of large datasets like NarrativeQA +``` + +`PRUNE_TOKENIZERS` can contain more than one tokenizer; this will include only samples that are +less than `PRUNE_MAX_TOKENS` for ALL of the tokenizers. This can be useful to comparing models +that use different tokenizers but the same maximum sequence length. + +Once the subset task class has been defined in this file, it can be used by adding the class +to `lm_eval/tasks/__init__.py`. + +NOTE: GovReport may need `max_gen_toks` set larger for causal models. +""" diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_contractnli.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_contractnli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ca93b6f13204676bf6f649da770f0436559cc26 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_contractnli.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_contractnli +class: !function task.ContractNLI diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_govreport.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_govreport.yaml new file mode 100644 index 0000000000000000000000000000000000000000..237a7ca6b7e36b21929da832d0b2f3bdb0e44ae4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_govreport.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_govreport +class: !function task.GovReport diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_narrativeqa.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_narrativeqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46f45aacedc5847f9c5dd0e0334815a4d10b5391 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_narrativeqa.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_narrativeqa +class: !function task.NarrativeQA diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qasper.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qasper.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9de2c72bab5adbe2b62ccbab9a3624c07ae4655 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qasper.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_qasper +class: !function task.Qasper diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qmsum.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qmsum.yaml new file mode 100644 index 0000000000000000000000000000000000000000..060fd2ad90641ccb9fe3186c919e6b93d6d1f856 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_qmsum.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_qmsum +class: !function task.QMSum diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_quality.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_quality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50fe92463c7f92ce65f6d444faa7a1104cf66f66 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_quality.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_quality +class: !function task.QuALITY diff --git a/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_summscreenfd.yaml b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_summscreenfd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..246af64b7ea8d300cb95dd9a1a565e6e8e63a8dd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/scrolls/scrolls_summscreenfd.yaml @@ -0,0 +1,3 @@ +group: scrolls +task: scrolls_summscreenfd +class: !function task.SummScreenFD diff --git a/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_my.yaml b/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_my.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47c3ae187209901b1fd711e680c1c3d46fdff48e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_my.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_my +dataset_name: my diff --git a/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_ru.yaml b/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8617ab08db68e066c4165b0480801b2e5e16d9a2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xstorycloze/default_ru.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_ru +dataset_name: ru