diff --git a/lm-evaluation/lm_eval/tasks/aclue/README.md b/lm-evaluation/lm_eval/tasks/aclue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d8707c01bf555d54f14d825501f54428c33cbe89 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/README.md @@ -0,0 +1,50 @@ +# ACLUE + +### Paper + +Can Large Language Model Comprehend Ancient Chinese? A Preliminary Test on ACLUE +https://arxiv.org/abs/2310.09550 + +The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese. The benchmark comprises 15 tasks spanning various domains, including lexical, syntactic, semantic, inference, and knowledge. ACLUE's tasks are derived from a combination of manually curated questions from publicly available resources, and automatically +generated questions from classical Chinese language corpora. The range of questions span from the Xia dynasty (2070 BCE) to the Ming dynasty (1368 CE). ACLUE adopts a multiple-choice question format for all tasks. + +Homepage: https://github.com/isen-zhang/ACLUE + +### Citation + +```bibtex +@inproceedings{zhang-li-2023-large, + title = "Can Large Langauge Model Comprehend {A}ncient {C}hinese? A Preliminary Test on {ACLUE}", + author = "Zhang, Yixuan and Li, Haonan", + booktitle = "Proceedings of the Ancient Language Processing Workshop", + month = sep, + year = "2023", + address = "Varna, Bulgaria", + publisher = "INCOMA Ltd., Shoumen, Bulgaria", + url = "https://aclanthology.org/2023.alp-1.9", + pages = "80--87" +} +``` + +### Groups and Tasks + +#### Groups + +- `aclue`: All 15 subjects of the ACLUE dataset, evaluated following the methodology in CMMLU's original implementation. + +#### Tasks + +The following tasks evaluate subjects in the ACLUE dataset using loglikelihood-based multiple-choice scoring: +- `aclue_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_medical.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_medical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bceaa702c53a1526fc84cf8f5141570352581a44 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_ancient_medical.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ancient_medical" +"description": "以下是关于医古文的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_ancient_medical" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee0deea16f6bcb6906fd68e2e65bf72ea276e74a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml @@ -0,0 +1,4 @@ +"dataset_name": "polysemy_resolution" +"description": "以下是关于古文单字多义的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_polysemy_resolution" diff --git a/lm-evaluation/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml b/lm-evaluation/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d81c3fe6eae35a6adc888d9c73430aa891bfe86 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sentence_segmentation" +"description": "以下是关于古文断句的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_sentence_segmentation" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/_generate_configs.py b/lm-evaluation/lm_eval/tasks/cmmlu/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..52081150cbe0dc7c12aa4fe7ef7a324b29d85ed7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/_generate_configs.py @@ -0,0 +1,133 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import argparse +import os + +import yaml +from tqdm import tqdm + +from lm_eval.utils import eval_logger + + +SUBJECTS = { + "agronomy": "农学", + "anatomy": "解剖学", + "ancient_chinese": "古汉语", + "arts": "艺术学", + "astronomy": "天文学", + "business_ethics": "商业伦理", + "chinese_civil_service_exam": "中国公务员考试", + "chinese_driving_rule": "中国驾驶规则", + "chinese_food_culture": "中国饮食文化", + "chinese_foreign_policy": "中国外交政策", + "chinese_history": "中国历史", + "chinese_literature": "中国文学", + "chinese_teacher_qualification": "中国教师资格", + "clinical_knowledge": "临床知识", + "college_actuarial_science": "大学精算学", + "college_education": "大学教育学", + "college_engineering_hydrology": "大学工程水文学", + "college_law": "大学法律", + "college_mathematics": "大学数学", + "college_medical_statistics": "大学医学统计", + "college_medicine": "大学医学", + "computer_science": "计算机科学", + "computer_security": "计算机安全", + "conceptual_physics": "概念物理学", + "construction_project_management": "建设工程管理", + "economics": "经济学", + "education": "教育学", + "electrical_engineering": "电气工程", + "elementary_chinese": "小学语文", + "elementary_commonsense": "小学常识", + "elementary_information_and_technology": "小学信息技术", + "elementary_mathematics": "初等数学", + "ethnology": "民族学", + "food_science": "食品科学", + "genetics": "遗传学", + "global_facts": "全球事实", + "high_school_biology": "高中生物", + "high_school_chemistry": "高中化学", + "high_school_geography": "高中地理", + "high_school_mathematics": "高中数学", + "high_school_physics": "高中物理学", + "high_school_politics": "高中政治", + "human_sexuality": "人类性行为", + "international_law": "国际法学", + "journalism": "新闻学", + "jurisprudence": "法理学", + "legal_and_moral_basis": "法律与道德基础", + "logical": "逻辑学", + "machine_learning": "机器学习", + "management": "管理学", + "marketing": "市场营销", + "marxist_theory": "马克思主义理论", + "modern_chinese": "现代汉语", + "nutrition": "营养学", + "philosophy": "哲学", + "professional_accounting": "专业会计", + "professional_law": "专业法学", + "professional_medicine": "专业医学", + "professional_psychology": "专业心理学", + "public_relations": "公共关系", + "security_study": "安全研究", + "sociology": "社会学", + "sports_science": "体育学", + "traditional_chinese_medicine": "中医中药", + "virology": "病毒学", + "world_history": "世界历史", + "world_religions": "世界宗教", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="cmmlu") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path, encoding="utf-8") as f: + cot_file = json.load(f) + + for subject_eng, subject_zh in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = ( + f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n" + ) + + yaml_dict = { + "include": base_yaml_name, + "task": f"cmmlu_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"cmmlu_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6007825cb9f3cd8c0af7e25c7de6d1c965f612a0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "arts" +"description": "以下是关于艺术学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_arts" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10135b604b3a96ba2c894dc86a9b3af1382728a2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "以下是关于商业伦理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_business_ethics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dcf6c7e6eeb52f551442de521ed4cc4fdfd272f1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_civil_service_exam" +"description": "以下是关于中国公务员考试的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_civil_service_exam" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4166c122c8c12a0268f67ed646e2c31698d7a40c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_literature" +"description": "以下是关于中国文学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_literature" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3543486b113bdc0a56ac96feadbbc1f3a8ed997b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_actuarial_science" +"description": "以下是关于大学精算学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_actuarial_science" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7876a584e7e3c936d30c7e4ad81381ec7e535493 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "以下是关于大学数学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_mathematics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..86c874e539d21d55540e7e5adce32a624d4a706c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_science" +"description": "以下是关于计算机科学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_computer_science" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f67be3fc40f5c038b455edcc6076675a4451261 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_chinese" +"description": "以下是关于小学语文的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_chinese" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3017edd999a0ee04de4a5dd8c7dc4b1b6218f5e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_commonsense" +"description": "以下是关于小学常识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_commonsense" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98c7d3c8f2d85f3c52a3314253d2d2151f7116ae --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_information_and_technology" +"description": "以下是关于小学信息技术的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_information_and_technology" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9be450ca2ea2190c6dd3b0639ad9fbd12d968443 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "food_science" +"description": "以下是关于食品科学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_food_science" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f689dff61a4ea55628b04f9bed5202e48c6eb70 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_politics" +"description": "以下是关于高中政治的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_politics" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39ff32e728dd228dd675f708dc6e2680c96f0900 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "以下是关于人类性行为的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_human_sexuality" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a5e3ee13b6e9670f33068bc731acebf7489737ec --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml @@ -0,0 +1,4 @@ +"dataset_name": "legal_and_moral_basis" +"description": "以下是关于法律与道德基础的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_legal_and_moral_basis" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa5681babeb650cc451c15e3496ca4d0ed3a1e0f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "以下是关于管理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_management" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13b2ccc4f939876616ceeda42d211e96347ce060 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "modern_chinese" +"description": "以下是关于现代汉语的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_modern_chinese" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92fed45e74f9b69b2c7b595a4bb682318fe0b81c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "以下是关于专业医学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_medicine" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1560b84f682493ef53a9c26ae1d36ac520ff46c7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "以下是关于病毒学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_virology" diff --git a/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..993ce0ab6e390a81286df213e5d3ddd9fe3908bd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_history" +"description": "以下是关于世界历史的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_world_history" diff --git a/lm-evaluation/lm_eval/tasks/drop/default.yaml b/lm-evaluation/lm_eval/tasks/drop/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a936121524950e8a89822058cb2b29f244f31a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/drop/default.yaml @@ -0,0 +1,26 @@ +task: drop +dataset_path: EleutherAI/drop +output_type: generate_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs +doc_to_text: "{{passage}} {{question}}" +doc_to_target: "{{ answer|join(',')}}" +target_delimiter: "" +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{passage}} {{question}}" +generation_kwargs: + until: + - "." +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true +metadata: + version: 3.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/drop/utils.py b/lm-evaluation/lm_eval/tasks/drop/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54093bb4d28e954035e76d8764a014ca99b99d8d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/drop/utils.py @@ -0,0 +1,204 @@ +import re +import string + +import numpy as np +from scipy.optimize import linear_sum_assignment + + +_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE) + + +def process_docs(dataset): + def _process(doc): + return { + "id": doc["query_id"], + "passage": doc["passage"], + "question": doc["question"], + "answers": get_answers(doc), + } + + return dataset.map(_process) + + +def get_answers(doc): + def _flatten_validated_answers(validated_answers): + """Flattens a dict of lists of validated answers. + {"number": ['1', '8'], ...} + -> [{"number": ['1'], ...}, {"number": ['8'], ...}] + """ + valid_answers = [] + for i in range(len(validated_answers["number"])): + valid_answers.append( + { + "number": validated_answers["number"][i], + "date": validated_answers["date"][i], + "spans": validated_answers["spans"][i], + } + ) + return valid_answers + + answers = [] + answers_set = set() + candidates = [doc["answer"]] + _flatten_validated_answers(doc["validated_answers"]) + for candidate in candidates: + answer = parse_answer(candidate) + if answer in answers_set: + continue + answers_set.add(answer) + answers.append(answer) + return answers + + +def parse_answer(answer): + # NOTE: Everything is returned as a tuple for uniformity and hashability. + if answer["number"] != "": + return (str(answer["number"]),) + if answer["spans"] != []: + return tuple(answer["spans"]) + return ( + " ".join( + [answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]] + ).strip(), + ) + + +def process_results(doc, results): + preds, golds = results, doc["answers"] + max_em = 0 + max_f1 = 0 + for gold_answer in golds: + exact_match, f1_score = get_metrics(preds, gold_answer) + if gold_answer[0].strip(): + max_em = max(max_em, exact_match) + max_f1 = max(max_f1, f1_score) + return {"em": max_em, "f1": max_f1} + + +def get_metrics(predicted, gold): + """ + Takes a predicted answer and a gold answer (that are both either a string or a list of + strings), and returns exact match and the DROP F1 metric for the prediction. If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len( + gold_bags[0] + ): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def _answer_to_bags(answer): + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted, gold): + """ + Takes gold and predicted answer sets and first finds the optimal 1-1 alignment + between them and gets maximum metric values over all the answers. + """ + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag, gold_bag): + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ( + (2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) + else 0.0 + ) + return f1 + + +def _match_numbers_if_present(gold_bag, predicted_bag): + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def _is_number(text): + try: + float(text) + return True + except ValueError: + return False + + +def _remove_articles(text): + return _ARTICLES.sub(" ", text) + + +def _white_space_fix(text): + return " ".join(text.split()) + + +def _remove_punc(text): + exclude = set(string.punctuation) + if not _is_number(text): + return "".join(ch for ch in text if ch not in exclude) + else: + return text + + +def _fix_number(text): + return str(float(text)) if _is_number(text) else text + + +def _tokenize(text): + return re.split(" |-", text) + + +def _normalize(answer): + tokens = [ + _white_space_fix(_remove_articles(_fix_number(_remove_punc(token.lower())))) + for token in _tokenize(answer) + ] + tokens = [token for token in tokens if token.strip()] + normalized = " ".join(tokens).strip() + return normalized diff --git a/lm-evaluation/lm_eval/tasks/gsm8k/README.md b/lm-evaluation/lm_eval/tasks/gsm8k/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13339dfa46366298389e3ad0d3910b00db2c417e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gsm8k/README.md @@ -0,0 +1,59 @@ +# GSM8k + +## Paper +Training Verifiers to Solve Math Word Problems +https://arxiv.org/abs/2110.14168 + +State-of-the-art language models can match human performance on many tasks, but +they still struggle to robustly perform multi-step mathematical reasoning. To +diagnose the failures of current models and support research, we introduce GSM8K, +a dataset of 8.5K high quality linguistically diverse grade school math word problems. +We find that even the largest transformer models fail to achieve high test performance, +despite the conceptual simplicity of this problem distribution. + +NOTE: See the official implementation of the task: + https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py +for how to make use of the dataset's calculator annotations in your language +model's sample/generation function. + +Homepage: https://github.com/openai/grade-school-math + + +## Citation +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +- `math_word_problems` +- `chain_of_thought` +- `self_consistency` + +#### Tasks + +- `gsm8k_yaml` +- `gsm8k_cot`: GSM8K with Chain-of-Thought +- `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d92ee342d18bb2e9f2da7573fd0c72ddd65db9c8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml @@ -0,0 +1,34 @@ +include: gsm8k-cot.yaml +group: + - chain_of_thought + - self_consistency +task: gsm8k_cot_self_consistency +generation_kwargs: + until: + - "Q:" + - "\n\n" + do_sample: true + temperature: 0.2 +repeats: 64 +filter_list: + - name: "score-first" # pick only the first response, and report metrics on that + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" + - name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" + - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal. + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +metadata: + version: 2.0 diff --git a/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75d4468ac02d551d135ef78a752aba0d157e72ab --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml @@ -0,0 +1,44 @@ +group: + - math_word_problems +task: gsm8k_cot_zeroshot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Q: {{question}}\nA: Let's think step by step." +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot.yaml b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e55020258930e400ace1fc8cb85949e1af347a13 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k-cot.yaml @@ -0,0 +1,51 @@ +group: + - chain_of_thought +task: gsm8k_cot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +test_split: test +doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\ +Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\ +Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\ +Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\ +Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\ +Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\ +Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\ +Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\ +Q: {{question}}\nA:" +doc_to_target: "{{answer.split('####')[-1].strip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 + num_fewshot: 8 diff --git a/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k.yaml b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c4ef836b1b21177d40c10e410cf69051c98e9e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gsm8k/gsm8k.yaml @@ -0,0 +1,45 @@ +group: + - math_word_problems +task: gsm8k +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-as.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-as.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91c392a9484da036e2664d4356b6590c9ece8779 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-as.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-as +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-as diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bd.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aeab3d4dc2fa2ef6a1e6849d50993e2727eb228a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bd.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-bd +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-bd diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bn.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4eb4ffc5b92c9ed485eba897dfb228bb8c771660 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-bn.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-bn +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-bn diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-gu.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11f14925850d0aa130c204377b1b23a4c70e736e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-gu.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-gu +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-gu diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-hi.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55011abde5e6a30761d592b563fa43eb464e6dc3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-hi.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-hi +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-hi diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-kn.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc19b755ea9bd4fcbc97668640c45e2b9deb043a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-kn.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-kn +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-kn diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ml.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a794b417aec222615c9da44a5f2b127966bfa275 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ml.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-ml +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-ml diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-mr.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c933d0519a4ca434617582ddc478cc62763c8840 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-mr.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-mr +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-mr diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-or.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-or.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66f9ddc90e421dbd74c043fd2179a1b2ad5cad1c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-or.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-or +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-or diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-pa.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-pa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db43f1812632084b98837f6ceedcde5fb94c83eb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-pa.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-pa +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-pa diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ta.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6f46bb231efafe8df3a44fb02ea772149c6299b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-ta.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-ta +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-ta diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-te.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..348d2b360f477c0ab5b042d2310d4c3f5799b7d9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment-te.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-te +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-te diff --git a/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment.yaml b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b9ced0c218dd6fe327711a29a42c53bb0ba7d4f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicsentiment/indicsentiment.yaml @@ -0,0 +1,20 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +dataset_path: ai4bharat/IndicSentiment +dataset_name: translation-#lang# +validation_split: validation + +output_type: multiple_choice +doc_to_text: "Predict the sentiment of the review. The possible choices for the sentiment are: 'Positive' and 'Negative'.\n {INDIC REVIEW}" +doc_to_target: LABEL +doc_to_choice: ["Positive", "Negative"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 + + +task: indicSentiment-#lang# diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/README.md b/lm-evaluation/lm_eval/tasks/indictranslation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bd36302619a2cc1b40b57ef758d328d85580e420 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/README.md @@ -0,0 +1,39 @@ +# Translation Tasks + +### Paper + + + +### Citation + +``` + +``` + +### Groups and Tasks + +#### Groups + +* `gpt3_translation_tasks` +* `wmt14` +* `wmt16` +* `wmt20` +* `iwslt2017` + +#### Tasks + +* + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + * [ ] Checked for equivalence with v0.3.0 LM Evaluation Harness diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-as.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-as.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5411464dddc2a3d92686e9f8c1569d5e785cafe --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-as.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-asm_Beng +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_asm_Beng +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-as \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-bn.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c242b640b3368c9e6ba65815c6b34de9d30faa0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-bn.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-ben_Beng +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_ben_Beng +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-bn \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-gu.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb4af523d3310f6e26fad1310200c41ac524d28b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-gu.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-guj_Gujr +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_guj_Gujr +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-gu \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-hi.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc0999c3984d1c972ff0a16a4b3e8421915c93a9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-hi.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-hin_Deva +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_hin_Deva +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-hi \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-kn.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3dda7b5072218ae99bda2b312a0571a69c475d6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-kn.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-kan_Knda +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_kan_Knda +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-kn \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ml.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..644be5f64a6546dd96dfe6bcc1bdabae36633411 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ml.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-mal_Mlym +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_mal_Mlym +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-ml \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-mr.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88601f9aa0b16e57b51bd5436be5a5046d2f8c1e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-mr.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-mar_Deva +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_mar_Deva +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-mr \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-or.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-or.yaml new file mode 100644 index 0000000000000000000000000000000000000000..899cc27c481ae07f9989025c46dd1077a7d0d8ef --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-or.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-ory_Orya +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_ory_Orya +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-or \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-pa.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-pa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fb2aeb787213e549a9607f3f5a310fea701872a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-pa.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-pan_Guru +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_pan_Guru +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-pa \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ta.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea036878543be9e5987bc59f834dc484457388e5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-ta.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-tam_Taml +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_tam_Taml +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-ta \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-te.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9957d02e01d269d015c214b9bb5b5169890ff497 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/flores_en-te.yaml @@ -0,0 +1,24 @@ +dataset_name: eng_Latn-tel_Telu +dataset_path: Muennighoff/flores200 +doc_to_target: sentence_tel_Telu +doc_to_text: 'Arabic phrase: {{sentence_eng_Latn}} + + English phrase:' +output_type: generate_until +# training_split: train +# validation_split: validation +# fewshot_split: validation +test_split: devtest +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 +task: flores-en-te \ No newline at end of file diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_ar-en.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_ar-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea713393c1dfbe9f7e1f6d055dd4768ace31269e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_ar-en.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Arabic phrase: {{translation["ar"]}} + + English phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-ar-en diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_en-ar.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_en-ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..891ad50fd6fb60fdb8f21f9004857d739a15640f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/iwslt2017_en-ar.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["ar"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Arabic phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-en-ar diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/utils.py b/lm-evaluation/lm_eval/tasks/indictranslation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f30c4d86259259a325edcee3b64ad3199b966c96 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/utils.py @@ -0,0 +1,118 @@ +import argparse + +import yaml + + +try: + import pycountry +except ModuleNotFoundError: + raise Exception( + "`pycountry` is required for generating translation task prompt templates. \ +please install pycountry via pip install lm-eval[multilingual] or pip install -e .[multilingual]", + ) + + +# Different translation benchmarks included in the library. Mostly WMT. +# These correspond to dataset names (subsets) on HuggingFace for each dataset. +# A yaml file is generated by this script for each language pair. + +gpt3_translation_benchmarks = { + "wmt14": ["fr-en"], # ["en-fr", "fr-en"], # French + "wmt16": [ + "ro-en", + "de-en", + ], # ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian +} + +# 28 total +LANGUAGES = { + **gpt3_translation_benchmarks, + # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"), + "iwslt2017": ["en-ar"], # Arabic +} + + +def code_to_language(code): + # key is alpha_2 or alpha_3 depending on the code length + language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code}) + return language_tuple.name + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + for dataset_name in LANGUAGES[lang]: + src_lang, _, tgt_lang = dataset_name.partition("-") + for src, tgt in [[src_lang, tgt_lang], [tgt_lang, src_lang]]: + # both translation directions for each lang pair + lang_pair = src + "-" + tgt + file_name = f"{lang}_{lang_pair}.yaml" + try: + source, target = code_to_language(src), code_to_language(tgt) + + groups = ["generate_until", "translation", lang] + if lang in gpt3_translation_benchmarks.keys(): + groups += ["gpt3_translation_benchmarks"] + + with open( + f"{output_dir}/{file_name}", + "w" if overwrite else "x", + encoding="utf8", + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "wmt_common_yaml", + "group": groups, + "dataset_path": lang, + "dataset_name": dataset_name + if not (lang == "iwslt2017") + else "iwslt2017-" + dataset_name, + "task": f"{lang}-{lang_pair}", + "doc_to_text": f"{source} phrase: " + + "{{translation[" + + f'"{src}"' + + "]}}\n" + + f"{target} phrase:", + "doc_to_target": " {{" + + "translation[" + + f'"{tgt}"]' + + "}}", + }, + f, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_en-fr.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_en-fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7e42dca5acca5036ec8b3b619501557c6a1c36c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_en-fr.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["fr"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + French phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-en-fr diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_fr-en.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_fr-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09ddd57d6049c29f35150aa4de94c6db3604a0a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt14_fr-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'French phrase: {{translation["fr"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-fr-en diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_de-en.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_de-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23d50e4aacc8c4e19a8b282e4051e80ec18edf29 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_de-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'German phrase: {{translation["de"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-de-en diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-de.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d391b6c6b879c15f0c8d63119824647ea6997c3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-de.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["de"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + German phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-de diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-ro.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-ro.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45a8cae11824bd726064448422f021ec73d7ce87 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_en-ro.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["ro"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Romanian phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-ro diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_ro-en.yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_ro-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39441eac1c8cb2a8ec4d4e9c9b31402607a5ea77 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt16_ro-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Romanian phrase: {{translation["ro"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-ro-en diff --git a/lm-evaluation/lm_eval/tasks/indictranslation/wmt_common_yaml b/lm-evaluation/lm_eval/tasks/indictranslation/wmt_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cb3c7c8f8d8305e9907c89c94d6f8fd95c709fc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indictranslation/wmt_common_yaml @@ -0,0 +1,17 @@ +output_type: generate_until +training_split: train +validation_split: validation +fewshot_split: validation +test_split: test +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/kobest/kobest_boolq.yaml b/lm-evaluation/lm_eval/tasks/kobest/kobest_boolq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9932d56a9300f31bd96a1cd14ee2df091005b21 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kobest/kobest_boolq.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_boolq +dataset_path: skt/kobest_v1 +dataset_name: boolq +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "{{paragraph}} 질문: {{question}} 답변: " +doc_to_target: "{{label}}" +doc_to_choice: ["아니오", "예"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/kobest/kobest_copa.yaml b/lm-evaluation/lm_eval/tasks/kobest/kobest_copa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f3b34e61fad86a037010dd892fd7b894346f456 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kobest/kobest_copa.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_copa +dataset_path: skt/kobest_v1 +dataset_name: copa +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.copa_doc_to_text +doc_to_target: !function utils.copa_doc_to_target +doc_to_choice: !function utils.copa_doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/kobest/kobest_hellaswag.yaml b/lm-evaluation/lm_eval/tasks/kobest/kobest_hellaswag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d83266a813ecd5a9ffd1989d45ac4c49b5779558 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kobest/kobest_hellaswag.yaml @@ -0,0 +1,27 @@ +group: + - kobest +task: kobest_hellaswag +dataset_path: skt/kobest_v1 +dataset_name: hellaswag +training_split: train +validation_split: validation +output_type: multiple_choice +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{label}}" +process_docs: !function utils.hellaswag_process_doc +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: acc_norm + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/kobest/kobest_wic.yaml b/lm-evaluation/lm_eval/tasks/kobest/kobest_wic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..569d3393dbe78e1bb5d92e00d4ceac439282b9d0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kobest/kobest_wic.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_wic +dataset_path: skt/kobest_v1 +dataset_name: wic +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.wic_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ['아니오', '예'] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/xcopa/default_et.yaml b/lm-evaluation/lm_eval/tasks/xcopa/default_et.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f2b0b73b585ab1a6e4c946237e52dba283a830a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xcopa/default_et.yaml @@ -0,0 +1,14 @@ +group: xcopa +task: xcopa_et +dataset_path: xcopa +dataset_name: et +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: !function utils.doc_to_text_et +doc_to_target: label +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/xcopa/default_id.yaml b/lm-evaluation/lm_eval/tasks/xcopa/default_id.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08fda55c8bba30023936fc11c2efa8de6007125c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xcopa/default_id.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_id +dataset_name: id +doc_to_text: !function utils.doc_to_text_id diff --git a/lm-evaluation/lm_eval/tasks/xcopa/default_ta.yaml b/lm-evaluation/lm_eval/tasks/xcopa/default_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..216cacf89bd233858e613909e32e4b909c6bb338 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xcopa/default_ta.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_ta +dataset_name: ta +doc_to_text: !function utils.doc_to_text_ta diff --git a/lm-evaluation/lm_eval/tasks/xcopa/default_tr.yaml b/lm-evaluation/lm_eval/tasks/xcopa/default_tr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81dac28670f00227b641fe4af46ad1542f7d173e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xcopa/default_tr.yaml @@ -0,0 +1,4 @@ +include: default_et.yaml +task: xcopa_tr +dataset_name: tr +doc_to_text: !function utils.doc_to_text_tr diff --git a/lm-evaluation/lm_eval/tasks/xcopa/utils.py b/lm-evaluation/lm_eval/tasks/xcopa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9d85920baa7098fd20f853da6eadcbc787dedd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xcopa/utils.py @@ -0,0 +1,114 @@ +from functools import partial + + +def convert_choice(choice): + return choice[0].lower() + choice[1:] + + +def doc_to_text(doc, connector): + # Drop the period + conn = connector[doc["question"]] + return doc["premise"].strip()[:-1] + f" {conn}" + + +def doc_to_choice(doc): + return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])] + + +doc_to_text_et = partial( + doc_to_text, + connector={ + "cause": "sest", + "effect": "seetõttu", + }, +) + + +doc_to_text_ht = partial( + doc_to_text, + connector={ + "cause": "poukisa", + "effect": "donk sa", + }, +) + + +doc_to_text_it = partial( + doc_to_text, + connector={ + "cause": "perché", + "effect": "quindi", + }, +) + + +doc_to_text_id = partial( + doc_to_text, + connector={ + "cause": "karena", + "effect": "maka", + }, +) + + +doc_to_text_qu = partial( + doc_to_text, + connector={ + "cause": "imataq", + "effect": "chaymi", + }, +) + + +doc_to_text_sw = partial( + doc_to_text, + connector={ + "cause": "kwa sababu", + "effect": "kwa hiyo", + }, +) + + +doc_to_text_zh = partial( + doc_to_text, + connector={ + "cause": "因为", + "effect": "所以", + }, +) + + +doc_to_text_ta = partial( + doc_to_text, + connector={ + "cause": "காரணமாக", + "effect": "எனவே", + }, +) + + +doc_to_text_th = partial( + doc_to_text, + connector={ + "cause": "เพราะ", + "effect": "ดังนั้น", + }, +) + + +doc_to_text_tr = partial( + doc_to_text, + connector={ + "cause": "çünkü", + "effect": "bu yüzden", + }, +) + + +doc_to_text_vi = partial( + doc_to_text, + connector={ + "cause": "bởi vì", + "effect": "vì vậy", + }, +)