diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/README.md b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b83e2af67df4c6e82350caaae54a68709a1c59dd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/README.md @@ -0,0 +1,42 @@ +# ArabicMMLU + +### Paper + +ArabicMMLU: Measuring massive multitask language understanding in Arabic +This dataset has been translated from the original MMLU with the help of GPT-4. + +The original data [MMLU](https://arxiv.org/pdf/2009.03300v3.pdf) + +The translation has been done with AceGPT researchers [AceGPT](https://arxiv.org/abs/2309.12053) + +ArabicMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Arabic language and culture. +ArabicMMLU covers a wide range of subjects, comprising 57 topics that span from elementary to advanced professional levels. + +Homepage: [AceGPT Homepage](https://github.com/FreedomIntelligence/AceGPT/tree/main/eval/benchmark_eval/benchmarks/MMLUArabic) + +### Citation + + +### Groups and Tasks + +#### Groups + +- `ammlu`: All 57 subjects of the ArabicMMLU dataset, evaluated following the methodology in MMLU's original implementation. + +#### Tasks + + +The following tasks evaluate subjects in the ArabicMMLU dataset using loglikelihood-based multiple-choice scoring: +- `ammlu_{subject_english}` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_default_template_yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbcefffb7889e16d0e9abdc0015149e72b39b029 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_default_template_yaml @@ -0,0 +1,19 @@ +group: ammlu +dataset_path: Hennara/ammlu +test_split: test +fewshot_split: dev +fewshot_config: + sampler: first_n +output_type: multiple_choice +doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nالجواب:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..5105e94c2608851f223d221b757a4ea560b3087c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/_generate_configs.py @@ -0,0 +1,119 @@ +""" +Take in a YAML, and output all other splits with this YAML +""" +import argparse +import os + +import yaml +from tqdm import tqdm + + +SUBJECTS = { + "abstract_algebra": "ألعلوم وتقنية المعلومات و الرياضيات", + "anatomy": "ألعلوم وتقنية المعلومات و الرياضيات", + "astronomy": "ألعلوم وتقنية المعلومات و الرياضيات", + "business_ethics": "علوم أخرى", + "clinical_knowledge": "علوم أخرى", + "college_biology": "ألعلوم وتقنية المعلومات و الرياضيات", + "college_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات", + "college_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات", + "college_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات", + "college_medicine": "علوم أخرى", + "college_physics": "ألعلوم وتقنية المعلومات و الرياضيات", + "computer_security": "ألعلوم وتقنية المعلومات و الرياضيات", + "conceptual_physics": "ألعلوم وتقنية المعلومات و الرياضيات", + "econometrics": "العلوم الإجتماعية", + "electrical_engineering": "ألعلوم وتقنية المعلومات و الرياضيات", + "elementary_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات", + "formal_logic": "العلوم الانسانية", + "global_facts": "علوم أخرى", + "high_school_biology": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_chemistry": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_computer_science": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_european_history": "العلوم الانسانية", + "high_school_geography": "العلوم الإجتماعية", + "high_school_government_and_politics": "العلوم الإجتماعية", + "high_school_macroeconomics": "العلوم الإجتماعية", + "high_school_mathematics": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_microeconomics": "العلوم الإجتماعية", + "high_school_physics": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_psychology": "العلوم الإجتماعية", + "high_school_statistics": "ألعلوم وتقنية المعلومات و الرياضيات", + "high_school_us_history": "العلوم الانسانية", + "high_school_world_history": "العلوم الانسانية", + "human_aging": "علوم أخرى", + "human_sexuality": "العلوم الإجتماعية", + "international_law": "العلوم الانسانية", + "jurisprudence": "العلوم الانسانية", + "logical_fallacies": "العلوم الانسانية", + "machine_learning": "ألعلوم وتقنية المعلومات و الرياضيات", + "management": "علوم أخرى", + "marketing": "علوم أخرى", + "medical_genetics": "علوم أخرى", + "miscellaneous": "علوم أخرى", + "moral_disputes": "العلوم الانسانية", + "moral_scenarios": "العلوم الانسانية", + "nutrition": "علوم أخرى", + "philosophy": "العلوم الانسانية", + "prehistory": "العلوم الانسانية", + "professional_accounting": "علوم أخرى", + "professional_law": "العلوم الانسانية", + "professional_medicine": "علوم أخرى", + "professional_psychology": "العلوم الإجتماعية", + "public_relations": "العلوم الإجتماعية", + "security_studies": "العلوم الإجتماعية", + "sociology": "العلوم الإجتماعية", + "us_foreign_policy": "العلوم الإجتماعية", + "virology": "علوم أخرى", + "world_religions": "العلوم الانسانية", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="ammlu") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our other YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path, encoding="utf-8") as f: + cot_file = json.load(f) + + for subject_eng, category in tqdm(SUBJECTS.items()): + if args.cot_prompt_path is not None: + description = cot_file[subject_eng] + else: + description = f"فم بعملية التقييم في مجال {category} \n\n" + + yaml_dict = { + "include": base_yaml_name, + "task": f"ammlu_{args.task_prefix}_{subject_eng}" + if args.task_prefix != "" + else f"ammlu_{subject_eng}", + "dataset_name": subject_eng, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml" + print(f"Saving yaml for subset {subject_eng} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + width=float("inf"), + allow_unicode=True, + default_style='"', + ) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6448b2eefe7decc008a19ff306b83c306bdf9bcc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml @@ -0,0 +1,4 @@ +"dataset_name": "abstract_algebra" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_abstract_algebra" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_anatomy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_anatomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ea332903d86f6f76e4e43718d6f8ef4b1f887ea --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_anatomy" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_astronomy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_astronomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33e4d3e76f0808549fdae2e088c27dbef14c6035 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_astronomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "astronomy" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_astronomy" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_business_ethics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_business_ethics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f86b317a22deca40e17043fe5ddf1823b15873cb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_business_ethics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "business_ethics" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_business_ethics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_clinical_knowledge.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_clinical_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af040fbbeb75810c212d7e68db9010ee0ef69b17 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_clinical_knowledge" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_biology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0040902b6b40420777c1cfa3ea43f9e693745fb9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_biology" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_biology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d89e5d36d6c3e81b67cee20af8adaa64cdb69769 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_chemistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd24ec782052fbdb2a18c6b271d7db7fd1eb0c21 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_computer_science" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_computer_science" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_mathematics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f86305ff9f813b755c629800f9dabae900fd234b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_mathematics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_mathematics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_medicine.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20ed05afe21d5256169d05a658b9a34bbbf9f830 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_medicine" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_medicine" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a66b42a3fff3442e5aceb1a75e3f6472e63ad3b7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_college_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_physics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_physics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_computer_security.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_computer_security.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6d0edec21fe23c1f38f17a80990f9af70779759 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_computer_security" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d8b329adfbbb86221fbd7d467b691dfee67bb2c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "conceptual_physics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_conceptual_physics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_econometrics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_econometrics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ce1c23a555fb89e654430ed481199450204dd49 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_econometrics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "econometrics" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_econometrics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..209d660fbf8209c1688822a6f8c9b4c94d5b3131 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_electrical_engineering" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..afb9144da92bdc592e66f96133c538eb0c1829ef --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_elementary_mathematics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_formal_logic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_formal_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b20a85896bf52fb16e7536b46174f2671240224 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_formal_logic.yaml @@ -0,0 +1,4 @@ +"dataset_name": "formal_logic" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_formal_logic" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_global_facts.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_global_facts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8236238279cf72b117256b5faefc07ca6de6c3a3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_global_facts" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16bc3ab6b0b156367714db4b3f892517edaf35f3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_biology" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_biology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f2e675e4c80caad0aa387b6445d27e0ed4c95a7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_chemistry" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_chemistry" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6ab8409eb6648d80adca19929907f10b1f7c65e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_computer_science" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_computer_science" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f22a5991753c71b8147ee368e771faf600b86693 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_european_history" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_european_history" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f180cee343c1df1aed2b443db0f39aab519167c1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_geography" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88fe999fa3913051d3d43c3d8bbc7739d5567ff5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_government_and_politics" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_government_and_politics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17705ea14ab459635b2ff8ed4a7075a9912b27cd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_mathematics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_mathematics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e74936e3133978eb348e3509214fe395dc8da20 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_microeconomics" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_microeconomics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..368a384f7ed466b08cd96544dcf678570ca49be7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_physics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_physics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..224026225b85f638ae70afa46a55ee71e3fa082e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_psychology" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_psychology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49c97a7358b40dd5b4b8215a3cdd4eba58805c84 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_statistics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_statistics" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f96371a82a30620dbf1a90e6af00b67c25a83079 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_us_history" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_us_history" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_world_history.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_world_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dd7fc469acf0fc8fb15efbdf56ccc062bf84b4b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_world_history.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_world_history" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_world_history" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_aging.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_aging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..180ef373017943782c3a09a88066e839a499d00d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_aging.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_aging" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_human_aging" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d97b76f8434566ef32e7c925c839fe698e62b202 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml @@ -0,0 +1,4 @@ +"dataset_name": "human_sexuality" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_human_sexuality" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_international_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_international_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46660c50894a3677529b536681f9ba3d82447229 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_international_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "international_law" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_international_law" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97f34f0fa4402e88fa496b441e579f6c3f123737 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml @@ -0,0 +1,4 @@ +"dataset_name": "jurisprudence" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_jurisprudence" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..594501a443277599336b34b9b5af0bd3877b3e5c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "logical_fallacies" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_logical_fallacies" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_machine_learning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ddf19efc638872477410d6a80ca8ae9e8d40df30 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_machine_learning.yaml @@ -0,0 +1,4 @@ +"dataset_name": "machine_learning" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_machine_learning" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36780c08a5737939af46b1e9c34909c23e10f49c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_management.yaml @@ -0,0 +1,4 @@ +"dataset_name": "management" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_management" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_marketing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_marketing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a7cf1f1a8bb258316d929d8dcdfc69d2ed4afe7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_marketing.yaml @@ -0,0 +1,4 @@ +"dataset_name": "marketing" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_marketing" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b81c28b3e761fb842a1301d592977cdac57fcefd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml @@ -0,0 +1,4 @@ +"dataset_name": "miscellaneous" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_miscellaneous" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a01ffb2c4a29fd8bbf87edd08937ea7801681a56 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_disputes" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_moral_disputes" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_scenarios.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_scenarios.yaml new file mode 100644 index 0000000000000000000000000000000000000000..891f62917fc1077dc94c0f927bcddb046a2cfe20 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_moral_scenarios.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_scenarios" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_moral_scenarios" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_nutrition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_nutrition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e2c10a4108d0575656a0729170579182325b032 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_nutrition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "nutrition" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_nutrition" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_philosophy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_philosophy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7211875da65c4f8e0c8b6d5b7554d27a24d42aef --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_philosophy" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_prehistory.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_prehistory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..77a29ee976fadae33505888060195bdc496ca5a8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_prehistory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "prehistory" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_prehistory" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea4e68c93d418d4de3062bb3e7408d087916daf8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_accounting" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_professional_accounting" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ce37c8f63a8a10f911e9b225096a7d5d9c815c7c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_law" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_professional_law" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_medicine.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7b1d184d9905e1a7ab05db84184e0ab004da526 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_medicine.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_medicine" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_professional_medicine" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..65a721c92aeaaea67666c168dc9b1151e23ebab3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_professional_psychology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_public_relations.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_public_relations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2bdc98c67144b44c6cce84e2a21cfb9cfecdc9e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_public_relations.yaml @@ -0,0 +1,4 @@ +"dataset_name": "public_relations" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_public_relations" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_sociology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf92339270aac89ecf7a3acd29d217c4cdd31414 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_sociology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "sociology" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_sociology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f934dab9b7b7215afc9e35fb8c04e2264ab8364 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "us_foreign_policy" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_us_foreign_policy" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_virology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_virology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2b0ad6e42b9acdfcf3040baa73e2a155faaa4e7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_virology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "virology" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_virology" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_world_religions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_world_religions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc433e13d9da096bc16b3e64c1138bedc04b4813 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_world_religions" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/asdiv/README.md b/lm-evaluation/build/lib/lm_eval/tasks/asdiv/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e6e5aeec0403b8c854233089498c9248cf38f089 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/asdiv/README.md @@ -0,0 +1,56 @@ +# ASDiv + +### Paper + +Title: `ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers` + +Abstract: https://arxiv.org/abs/2106.15772 + +ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language +patterns and problem types) English math word problem (MWP) corpus for evaluating +the capability of various MWP solvers. Existing MWP corpora for studying AI progress +remain limited either in language usage patterns or in problem types. We thus present +a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem +types taught in elementary school. Each MWP is annotated with its problem type and grade +level (for indicating the level of difficulty). + +NOTE: We currently ignore formulas for answer generation. + +Homepage: https://github.com/chaochun/nlu-asdiv-dataset + + +### Citation + +``` +@misc{miao2021diverse, + title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers}, + author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su}, + year={2021}, + eprint={2106.15772}, + archivePrefix={arXiv}, + primaryClass={cs.AI} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `asdiv` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/asdiv/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/asdiv/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd3917c3c228dd8cca64fc40ffd27de55608f457 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/asdiv/default.yaml @@ -0,0 +1,16 @@ +task: asdiv +dataset_path: EleutherAI/asdiv +output_type: loglikelihood +validation_split: validation +doc_to_text: "{{body}}\nQuestion:{{question}}\nAnswer:" +doc_to_target: "{{answer.split(' (')[0]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{body}} {{question}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/adjunct_island.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/adjunct_island.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abdb4b8c898e71eac1da1de57b4ff9b425a32644 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/adjunct_island.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: adjunct_island +include: _template_yaml +task: blimp_adjunct_island diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9117dafad3c43968010d4c595d0ffafcc377de44 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anaphor_gender_agreement +include: _template_yaml +task: blimp_anaphor_gender_agreement diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57f12ecade63b595378cb2c9aadf710725e9d4b0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: determiner_noun_agreement_with_adj_irregular_1 +include: _template_yaml +task: blimp_determiner_noun_agreement_with_adj_irregular_1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4512e9176f98a9f2ec3f53de15657b97274809fb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: determiner_noun_agreement_with_adjective_1 +include: _template_yaml +task: blimp_determiner_noun_agreement_with_adjective_1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/drop_argument.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/drop_argument.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db3b1fed109c802774c1ac8e347a931febc89646 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/drop_argument.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: drop_argument +include: _template_yaml +task: blimp_drop_argument diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_object_raising.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_object_raising.yaml new file mode 100644 index 0000000000000000000000000000000000000000..765596462dce91f51b557fca254deef3a2ee325e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_object_raising.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: existential_there_object_raising +include: _template_yaml +task: blimp_existential_there_object_raising diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/intransitive.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/intransitive.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d5b7edbdc26833f7ae645889d8642077fd979bc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/intransitive.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intransitive +include: _template_yaml +task: blimp_intransitive diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..537c7764f671636cfb781382397f525d0fba305a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irregular_plural_subject_verb_agreement_1 +include: _template_yaml +task: blimp_irregular_plural_subject_verb_agreement_1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml new file mode 100644 index 0000000000000000000000000000000000000000..214de3c2edb49de48878e6baed1bf725c9728b98 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: left_branch_island_simple_question +include: _template_yaml +task: blimp_left_branch_island_simple_question diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/passive_1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/passive_1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0dd6aca0535d448d9269ae1959063d687955a17f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/passive_1.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: passive_1 +include: _template_yaml +task: blimp_passive_1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df607e5c79e02ef8b284ce2b458ba5371951fc89 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentential_negation_npi_licensor_present +include: _template_yaml +task: blimp_sentential_negation_npi_licensor_present diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ac031c4ecc1acf46bed9c5dbf333f140daa18155 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: superlative_quantifiers_2 +include: _template_yaml +task: blimp_superlative_quantifiers_2 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml b/lm-evaluation/build/lib/lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2221ce5fe0f55611003ab554d5f24aafad41bebf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: wh_vs_that_no_gap +include: _template_yaml +task: blimp_wh_vs_that_no_gap diff --git a/lm-evaluation/build/lib/lm_eval/tasks/scrolls/README.md b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a90e00f4e729711fc6ea7ccd0c375e4686f8970d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/README.md @@ -0,0 +1,31 @@ +""" +SCROLLS: Standardized CompaRison Over Long Language Sequences +https://arxiv.org/abs/2201.03533 + +SCROLLS is a suite of datasets that require synthesizing information over long texts. +The benchmark includes seven natural language tasks across multiple domains, +including summarization, question answering, and natural language inference. + +Homepage: https://www.scrolls-benchmark.com/ + +Since SCROLLS tasks are generally longer than the maximum sequence length of many models, +it is possible to create "subset" tasks that contain only those samples whose tokenized length +is less than some pre-defined limit. For example, to create a subset of "Qasper" that would +be suitable for a model using the GPTNeoX tokenizer and a 4K maximium sequence length: + +``` +class QasperGPTNeoX4K(Qasper): + PRUNE_TOKENIZERS = ["EleutherAI/pythia-410m-deduped"] + PRUNE_MAX_TOKENS = 4096 + PRUNE_NUM_PROC = _num_cpu_cores() # optional, to speed up pruning of large datasets like NarrativeQA +``` + +`PRUNE_TOKENIZERS` can contain more than one tokenizer; this will include only samples that are +less than `PRUNE_MAX_TOKENS` for ALL of the tokenizers. This can be useful to comparing models +that use different tokenizers but the same maximum sequence length. + +Once the subset task class has been defined in this file, it can be used by adding the class +to `lm_eval/tasks/__init__.py`. + +NOTE: GovReport may need `max_gen_toks` set larger for causal models. +""" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/scrolls/scrolls.yaml b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/scrolls.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da8d03e89e0fda15918a34357681ca98f4285ad8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/scrolls.yaml @@ -0,0 +1,16 @@ +group: scrolls +task: + - task: scrolls_qasper + class: !function task.Qasper + - task: scrolls_quality + class: !function task.QuALITY + - task: scrolls_narrativeqa + class: !function task.NarrativeQA + - task: scrolls_contractnli + class: !function task.ContractNLI + - task: scrolls_govreport + class: !function task.GovReport + - task: scrolls_summscreenfd + class: !function task.SummScreenFD + - task: scrolls_qmsum + class: !function task.QMSum diff --git a/lm-evaluation/build/lib/lm_eval/tasks/scrolls/task.py b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/task.py new file mode 100644 index 0000000000000000000000000000000000000000..5b604e15d9305848705af087c6a1da5590f62039 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/scrolls/task.py @@ -0,0 +1,456 @@ +import re +from abc import abstractmethod +from functools import reduce + +import numpy as np +import transformers.data.metrics.squad_metrics as squad_metrics +from datasets import load_metric +from transformers import AutoTokenizer + +from lm_eval.api.instance import Instance +from lm_eval.api.metrics import mean +from lm_eval.api.task import Task + + +_CITATION = """ +@inproceedings{shaham-etal-2022-scrolls, + title = "{SCROLLS}: Standardized {C}ompa{R}ison Over Long Language Sequences", + author = "Shaham, Uri and + Segal, Elad and + Ivgi, Maor and + Efrat, Avia and + Yoran, Ori and + Haviv, Adi and + Gupta, Ankit and + Xiong, Wenhan and + Geva, Mor and + Berant, Jonathan and + Levy, Omer", + booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", + month = dec, + year = "2022", + address = "Abu Dhabi, United Arab Emirates", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2022.emnlp-main.823", + pages = "12007--12021" +} +""" + +# SCROLLS is formualted as a sequence-to-sequence task. +# To allow for evaluation of causal models, we'll +# reformualte these with appropriate prompts + + +def _download_metric(): + import os + import shutil + + from huggingface_hub import hf_hub_download + + scrolls_metric_path = hf_hub_download( + repo_id="tau/scrolls", repo_type="dataset", filename="metrics/scrolls.py" + ) + updated_scrolls_metric_path = ( + os.path.dirname(scrolls_metric_path) + + os.path.basename(scrolls_metric_path).replace(".", "_") + + ".py" + ) + shutil.copy(scrolls_metric_path, updated_scrolls_metric_path) + return updated_scrolls_metric_path + + +def _process_doc_prepended_question(doc): + # "When a query is given in addition to the raw text (as + # in QMSum, Qasper, NarrativeQA, QuALITY, and ContractNLI), + # we prepend it to the text, using two newlines as a natural separator" + input = doc["input"] + split = input.find("\n\n") + return { + "id": doc["id"], + "pid": doc["pid"], + "input": input, + "outputs": doc["outputs"], + "question": input[0:split], + "text": input[split + 2 :], + } + + +def _drop_duplicates_in_input(untokenized_dataset): + # from scrolls/evaluator/dataset_evaluator.py + + indices_to_keep = [] + id_to_idx = {} + outputs = [] + for i, (id_, output) in enumerate( + zip(untokenized_dataset["id"], untokenized_dataset["output"]) + ): + if id_ in id_to_idx: + outputs[id_to_idx[id_]].append(output) + continue + indices_to_keep.append(i) + id_to_idx[id_] = len(outputs) + outputs.append([output]) + untokenized_dataset = untokenized_dataset.select(indices_to_keep).flatten_indices() + untokenized_dataset = untokenized_dataset.remove_columns("output") + untokenized_dataset = untokenized_dataset.add_column("outputs", outputs) + return untokenized_dataset + + +def _num_cpu_cores(): + # https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-using-python/55423170#55423170 + try: + import psutil + + return psutil.cpu_count(logical=False) + except ImportError: + import os + + return len(os.sched_getaffinity(0)) + + +class _SCROLLSTask(Task): + VERSION = 2 + DATASET_PATH = "tau/scrolls" + DATASET_NAME = None + PRUNE_TOKENIZERS = None + PRUNE_MAX_TOKENS = None + PRUNE_NUM_PROC = None + + def __init__(self): + super().__init__() + if self.DATASET_NAME is not None: + self.metric = load_metric(_download_metric(), config_name=self.DATASET_NAME) + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return False + + def training_docs(self): + for doc in self.dataset["train"]: + yield from self._process_doc(doc) + + def validation_docs(self): + for doc in self.dataset["validation"]: + yield from self._process_doc(doc) + + def should_decontaminate(self): + return True + + def doc_to_decontamination_query(self, doc): + return doc["input"] + + def download(self, *args, **kwargs): + super().download(*args, **kwargs) + del self.dataset["test"] + for split in self.dataset: + self.dataset[split] = _drop_duplicates_in_input(self.dataset[split]) + if self.PRUNE_TOKENIZERS is not None: + self.prune() + + def _get_prune_text(self, sample): + return self.doc_to_text(self._process_doc(sample)[0]) + + def prune(self): + """Create a pruned version of a SCROLLS task dataset containing only inputs + that are less than `max_tokens` when tokenized by each tokenizer + """ + + tokenizers = [ + AutoTokenizer.from_pretrained(tokenizer) + for tokenizer in self.PRUNE_TOKENIZERS + ] + cache = {} + + def _filter(sample): + text = self._get_prune_text(sample) + cached = cache.get(text, None) + if cached is None: + for tokenizer in tokenizers: + if len(tokenizer(text).input_ids) > self.PRUNE_MAX_TOKENS: + cache[text] = False + return False + cache[text] = True + return True + else: + return cached + + self.dataset = self.dataset.filter(_filter, num_proc=self.PRUNE_NUM_PROC) + + def doc_to_target(self, doc): + return " " + ", ".join(doc["outputs"]) + + def doc_to_text(self, doc): + return f"{doc['text']}\n\nQuestion: {doc['question']}\nAnswer:" + + def higher_is_better(self): + return {x: True for x in self._scrolls_metrics().keys()} + + @abstractmethod + def _scrolls_metrics(self): + pass + + def _make_compute_metrics(self, value): + def compute_metrics(samples): + predictions, references = zip(*samples) # unzip, if you will + computed = self.metric.compute( + predictions=predictions, references=references + ) + return computed[value] + + return compute_metrics + + def aggregation(self): + return { + key: self._make_compute_metrics(value) + for key, value in self._scrolls_metrics().items() + } + + +class _SCROLLSMultipleChoiceTask(_SCROLLSTask): + def __post_init__(self): + self.metric = None + + def _scrolls_metrics(self): + return None + + def aggregation(self): + return {"em": mean, "acc": mean, "acc_norm": mean} + + def higher_is_better(self): + return {"em": True, "acc": True, "acc_norm": True} + + def process_results(self, doc, results): + gold = doc["gold"] + + lls, _ = zip(*results) + acc = 1.0 if np.argmax(lls) == gold else 0.0 + completion_len = np.array([float(len(i)) for i in doc["choices"]]) + acc_norm = 1.0 if np.argmax(lls / completion_len) == gold else 0.0 + + return { + "acc": acc, + "acc_norm": acc_norm, + "em": acc_norm * 100.0, + } + + def construct_requests(self, doc, ctx, **kwargs): + request_list = [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " {}".format(choice)), + idx=i, + **kwargs, + ) + for i, choice in enumerate(doc["choices"]) + ] + return request_list + + +class _SCROLLSSummaryTask(_SCROLLSTask): + def _process_doc(self, doc): + return [doc] + + def _scrolls_metrics(self): + return { + "rouge1": "rouge/rouge1", + "rouge2": "rouge/rouge2", + "rougeL": "rouge/rougeL", + } + + def process_results(self, doc, results): + return { + "rouge1": (results[0], doc["outputs"]), + "rouge2": (results[0], doc["outputs"]), + "rougeL": (results[0], doc["outputs"]), + } + + def construct_requests(self, doc, ctx, **kwargs): + return Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs, + ) + + def doc_to_text(self, doc): + return f"{doc['input']}\n\nQuestion: What is a summary of the preceding text?\nAnswer:" + + +class Qasper(_SCROLLSTask): + """A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers + https://arxiv.org/abs/2105.03011 + """ + + DATASET_NAME = "qasper" + + def _process_doc(self, doc): + doc = _process_doc_prepended_question(doc) + doc["is_yes_no"] = reduce( + lambda prev, cur: prev + and squad_metrics.normalize_answer(cur) in ["yes", "no"], + doc["outputs"], + True, + ) + return [doc] + + def _scrolls_metrics(self): + return {"f1": "f1"} + + def process_results(self, doc, results): + if doc["is_yes_no"]: + prediction = " yes" if results[0] > results[1] else " no" + elif len(results[0].strip()) == 0: + prediction = "Unanswerable" + else: + prediction = results[0] + return {"f1": (prediction, doc["outputs"])} + + def construct_requests(self, doc, ctx, **kwargs): + if doc["is_yes_no"]: + return [ + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " yes"), + idx=0, + **kwargs, + ), + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " no"), + idx=1, + **kwargs, + ), + ] + else: + return Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs, + ) + + +class QuALITY(_SCROLLSMultipleChoiceTask): + """QuALITY: Question Answering with Long Input Texts, Yes! + https://arxiv.org/abs/2112.08608 + """ + + DATASET_NAME = "quality" + _multiple_choice_pattern = re.compile(r" *\([A-D]\) *") + + @staticmethod + def _normalize_answer(text): + return " ".join(text.split()).strip() + + def _process_doc(self, doc): + doc = _process_doc_prepended_question(doc) + + split = doc["text"].find("\n\n", doc["text"].find("(D)")) + choices_text = doc["text"][:split] + + doc["text"] = doc["text"][split:].strip() + doc["choices"] = [ + QuALITY._normalize_answer(choice) + for choice in re.split(QuALITY._multiple_choice_pattern, choices_text)[1:] + ] + doc["gold"] = doc["choices"].index(QuALITY._normalize_answer(doc["outputs"][0])) + + return [doc] + + +class NarrativeQA(_SCROLLSTask): + """The NarrativeQA Reading Comprehension Challenge + https://arxiv.org/abs/1712.07040 + """ + + DATASET_NAME = "narrative_qa" + + def _process_doc(self, doc): + return [_process_doc_prepended_question(doc)] + + def _scrolls_metrics(self): + return {"f1": "f1"} + + def _get_prune_text(self, doc): + # pruning narrativeqa takes forever -- let's cheat a bit + # and just cache on the text, not the question, since + # the dataset is different questions about the same large + # documents + return self._process_doc(doc)[0]["text"] + + def process_results(self, doc, results): + return {"f1": (results[0], doc["outputs"])} + + def construct_requests(self, doc, ctx, **kwargs): + return Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs, + ) + + +class ContractNLI(_SCROLLSMultipleChoiceTask): + """ContractNLI: A Dataset for Document-level Natural Language Inference for Contracts + https://arxiv.org/abs/1712.07040 + """ + + DATASET_NAME = "contract_nli" + CHOICES = ["Not mentioned", "Entailment", "Contradiction"] + + def _process_doc(self, doc): + doc = _process_doc_prepended_question(doc) + doc["choices"] = ContractNLI.CHOICES + doc["gold"] = ContractNLI.CHOICES.index(doc["outputs"][0]) + return [doc] + + def doc_to_text(self, doc): + return f"{doc['text']}\n\nHypothesis: {doc['question']}\nConclusion:" + + +class GovReport(_SCROLLSSummaryTask): + """Efficient Attentions for Long Document Summarization + https://arxiv.org/abs/2104.02112 + + Note: The average length of the reference summaries is ~3,000 + characters, or ~600 tokens as tokenized by GPT-NeoX. For causal models, + it is recommended to set `max_gen_toks` sufficently large (e.g. 1024) + to allow a full summary to be generated. + """ + + DATASET_NAME = "gov_report" + + +class SummScreenFD(_SCROLLSSummaryTask): + """SummScreen: A Dataset for Abstractive Screenplay Summarization + https://arxiv.org/abs/2104.07091 + """ + + DATASET_NAME = "summ_screen_fd" + + +class QMSum(_SCROLLSSummaryTask): + """QMSum: A New Benchmark for Query-based Multi-domain + Meeting Summarization + + https://arxiv.org/abs/2104.05938 + """ + + DATASET_NAME = "qmsum" + + def _process_doc(self, doc): + return [_process_doc_prepended_question(doc)] + + def doc_to_text(self, doc): + return f"{doc['text']}\n\nQuestion: {doc['question']}\nAnswer:"