diff --git a/lm-evaluation/lm_eval/tasks/anli/README.md b/lm-evaluation/lm_eval/tasks/anli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ba3f99d4826f0604f583772a2b48fe676a6f3e06 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/anli/README.md @@ -0,0 +1,56 @@ +# ANLI + +### Paper + +Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding` + +Paper Link: https://arxiv.org/abs/1910.14599 + +Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial +human-and-model-in-the-loop procedure. It consists of three rounds that progressively +increase in difficulty and complexity, and each question-answer includes annotator- +provided explanations. + +Homepage: https://github.com/facebookresearch/anli + +### Citation + +``` +@inproceedings{nie-etal-2020-adversarial, + title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding", + author = "Nie, Yixin and + Williams, Adina and + Dinan, Emily and + Bansal, Mohit and + Weston, Jason and + Kiela, Douwe", + booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", + year = "2020", + publisher = "Association for Computational Linguistics", +} +``` + +### Groups and Tasks + +#### Groups + +* `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3` + +#### Tasks +* `anli_r1`: The data collected adversarially in the first round. +* `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data. +* `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data. + + +### Checklist + +For adding novel benchmarks/datasets to the library: + * [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/anli/anli_r1.yaml b/lm-evaluation/lm_eval/tasks/anli/anli_r1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcf7674ee1bfc91f35e1566a6ddc5dc946c0ba72 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/anli/anli_r1.yaml @@ -0,0 +1,26 @@ +group: + - anli +task: anli_r1 +dataset_path: anli +dataset_name: null +output_type: multiple_choice +training_split: train_r1 +validation_split: dev_r1 +test_split: test_r1 +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:" +# True = entailment +# False = contradiction +# Neither = neutral +doc_to_target: "{{['True', 'Neither', 'False'][label]}}" +doc_to_choice: + - "True" + - "Neither" + - "False" +should_decontaminate: true +doc_to_decontamination_query: premise +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/anli/anli_r2.yaml b/lm-evaluation/lm_eval/tasks/anli/anli_r2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85f28d67cf230fa36cd38dd8d6a345f6e679c53e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/anli/anli_r2.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r2 +training_split: train_r2 +validation_split: dev_r2 +test_split: test_r2 diff --git a/lm-evaluation/lm_eval/tasks/anli/anli_r3.yaml b/lm-evaluation/lm_eval/tasks/anli/anli_r3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b9f98a867f7d03b90e84a425dc8b044b4cc96fb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/anli/anli_r3.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r3 +training_split: train_r3 +validation_split: dev_r3 +test_split: test_r3 diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/README.md b/lm-evaluation/lm_eval/tasks/eus_exams/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f44c5f04ed6eaded120ac7d9da177004a02483a7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/README.md @@ -0,0 +1,49 @@ +# EusExams + +### Paper + +Title: Latxa: An Open Language Model and Evaluation Suite for Basque + +Abstract: https://arxiv.org/abs/2403.20266 + +EusExams is a collection of tests designed to prepare individuals for Public Service examinations conducted by several Basque institutions, including the public health system Osakidetza, the Basque Government, the City Councils of Bilbao and Gasteiz, and the University of the Basque Country (UPV/EHU). Within each of these groups, there are different exams for public positions, such as administrative and assistant roles. Each multiple-choice question contains 2 to 4 choices (3.90 on average) and one correct answer. The dataset is mostly parallel with 16k questions in Basque and 18k in Spanish. + +Homepage: https://github.com/hitz-zentroa/latxa + + +### Citation + +``` +@misc{etxaniz2024latxa, + title={Latxa: An Open Language Model and Evaluation Suite for Basque}, + author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa}, + year={2024}, + eprint={2403.20266}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `eus_exams_eu`: The Basque version of the exams. +* `eus_exams_es`: The Spanish version of the exams. + +#### Tasks + +Basque and Spanish versions of the exams are available as separate tasks starting with `eus_exams_eu` and `eus_exams_es` respectively. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/configs.py b/lm-evaluation/lm_eval/tasks/eus_exams/configs.py new file mode 100644 index 0000000000000000000000000000000000000000..993faa9f5dda1df2b00301fb00367f75e58a14de --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/configs.py @@ -0,0 +1,67 @@ +import argparse +import json + +import requests +import yaml + + +# get configs from huggingface datasets server by doing a request +response = requests.get( + "https://datasets-server.huggingface.co/splits?dataset=HiTZ%2FEusExams", timeout=5 +) +response_json = json.loads(response.text) +CONFIGS = [split["config"] for split in response_json["splits"]] + + +def gen_config_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each configuage. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for config in CONFIGS: + file_name = f"eus_exams_{config}.yaml" + try: + with open(f"{output_dir}/{file_name}", "w" if overwrite else "x") as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "eus_exams_es" + if "eus_exams_es" in config + else "eus_exams_eu", + "dataset_name": config, + "task": f"eus_exams_{config}", + }, + f, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate configuage-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_config_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams new file mode 100644 index 0000000000000000000000000000000000000000..d1d2af731485ac26b2792b5de29d4da681bf97ad --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams @@ -0,0 +1,18 @@ +dataset_path: HiTZ/EusExams +dataset_name: null +validation_split: null +test_split: test +fewshot_split: test +process_docs: !function utils.process_docs +output_type: multiple_choice +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es new file mode 100644 index 0000000000000000000000000000000000000000..25886606943ec02e216d89df64cdc777a405cdaf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es @@ -0,0 +1,4 @@ +include: eus_exams +group: + - eus_exams_es +doc_to_text: "Pregunta: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nRespuesta:" diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22b93ed6b7c2964e0d64c3a5e3aa299a81752bf2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_ejadministrativo +include: eus_exams_es +task: eus_exams_es_ejadministrativo diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d0b011c9ab8ee58aa7cad819dd7475880512972 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_ejtecnico +include: eus_exams_es +task: eus_exams_es_ejtecnico diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d43c0b4161bf76988ace9416ac1bf1147a1f5c9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeayuntamientovitoria +include: eus_exams_es +task: eus_exams_es_opeayuntamientovitoria diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5cb33cbbddc9cd06ea24b7356fa19812bdf7a344 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opebilbao +include: eus_exams_es +task: eus_exams_es_opebilbao diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9cacfbd3f3e79dc24436daef4a1e9e5a1b5709d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehuadmin +include: eus_exams_es +task: eus_exams_es_opeehuadmin diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..728d7cfb0e7c79af156ff50bdb8379f032c3f01b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehubiblio +include: eus_exams_es +task: eus_exams_es_opeehubiblio diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13e1d9de4bf434854e835b98455a3c26c46e96ac --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehuderecho +include: eus_exams_es +task: eus_exams_es_opeehuderecho diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6625c9ce5a2501aa607cacf148531e0c3220652b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehueconomicas +include: eus_exams_es +task: eus_exams_es_opeehueconomicas diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f61d3a1433f0f8ea0dd2ab62d9eb2c291697a47 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehuempresariales +include: eus_exams_es +task: eus_exams_es_opeehuempresariales diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96cc86b402af1777e075530b3258e3a9089d539f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehusubalterno +include: eus_exams_es +task: eus_exams_es_opeehusubalterno diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0641fc2e7766d5f93ba1c45c83761f6e5b57560a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehutecnico +include: eus_exams_es +task: eus_exams_es_opeehutecnico diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a338a1ab0d542368acf179b0611a354ddb71d293 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehutecnicob +include: eus_exams_es +task: eus_exams_es_opeehutecnicob diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85c771cdb3ead8511963b811043891958f19e340 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiadmin +include: eus_exams_es +task: eus_exams_es_opeosakiadmin diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d61825b0beac1f50137ff42d74b6b649f30ea4e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiaux +include: eus_exams_es +task: eus_exams_es_opeosakiaux diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08fe0ed6c014ce69d7655c94ccd9dfdf029c8ce1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiauxenf +include: eus_exams_es +task: eus_exams_es_opeosakiauxenf diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a61b6878e9684ce0b35be5dc2fd25170cf9bf44 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakicelador +include: eus_exams_es +task: eus_exams_es_opeosakicelador diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a62dc8001f16a22e56c7bea270ad3e6f97ecf5fd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakijuridico +include: eus_exams_es +task: eus_exams_es_opeosakijuridico diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df72481742ea637ec2fbe546445cd450bd1bc632 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakioperario +include: eus_exams_es +task: eus_exams_es_opeosakioperario diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b5b397b88ed5318706a0e6f402acf34440761fe --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakitecnico +include: eus_exams_es +task: eus_exams_es_opeosakitecnico diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe98dc76aa4689ec46be58d05326adf6216264df --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakivarios +include: eus_exams_es +task: eus_exams_es_opeosakivarios diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..080f99fcf28f738117702d9ece800bdeed209b90 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza1c +include: eus_exams_es +task: eus_exams_es_osakidetza1c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ee8ab46c6e4e1dd712b5d5865fb87abac5ac89b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza2c +include: eus_exams_es +task: eus_exams_es_osakidetza2c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2974a11d797474ed13257ac46e7994c31253f83d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza3c +include: eus_exams_es +task: eus_exams_es_osakidetza3c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d94ef2b9f4e494a4ba21d0fc4c902d3ad125616a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza6c +include: eus_exams_es +task: eus_exams_es_osakidetza6c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1fc30ce353f83ccc717a504a50a7bd611f76e6c6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza7c +include: eus_exams_es +task: eus_exams_es_osakidetza7c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38f7ee3c39af34bc0516780a8717172950fc955a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza8c +include: eus_exams_es +task: eus_exams_es_osakidetza8c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b23ff670764634cb78bd5a4cbb9f141dad674d2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza9c +include: eus_exams_es +task: eus_exams_es_osakidetza9c diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu new file mode 100644 index 0000000000000000000000000000000000000000..95b82388df8930a2fe7385fff72f22c4cc516ab4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu @@ -0,0 +1,4 @@ +include: eus_exams +group: + - eus_exams_eu +doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:" diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf2806c1e6675c491ad5d1eaea54698bf8aa8fe8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejlaguntza +include: eus_exams_eu +task: eus_exams_eu_ejlaguntza diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d713a32442fc35d16b735c8617b0ee2d7327f04 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejlaguntzaile +include: eus_exams_eu +task: eus_exams_eu_ejlaguntzaile diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b528b9d4ce7ebc2ffc92af84a25e417f2e86929 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejteknikari +include: eus_exams_eu +task: eus_exams_eu_ejteknikari diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d15dbc6101ade859261bed36564eaf51e8a53f16 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opebilbaoeu +include: eus_exams_eu +task: eus_exams_eu_opebilbaoeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85b9c9047759b6652435abc84944770ff429daaa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuadmineu +include: eus_exams_eu +task: eus_exams_eu_opeehuadmineu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e72082486395abefcebda07de380b670d588589a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuauxeu +include: eus_exams_eu +task: eus_exams_eu_opeehuauxeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ff2ab853fc839c5ae2b88520767b8b3d4a60f4d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehubiblioeu +include: eus_exams_eu +task: eus_exams_eu_opeehubiblioeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..713f33234153c84778faeca25f4807cdf9812b45 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehueconomicaseu +include: eus_exams_eu +task: eus_exams_eu_opeehueconomicaseu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dddd9bc76cec647dafbc7400ae11d3e5147de83 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuempresarialeseu +include: eus_exams_eu +task: eus_exams_eu_opeehuempresarialeseu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b02a451dd957a11c5db2810fa98ab0bccd62c9b3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehusubalternoeu +include: eus_exams_eu +task: eus_exams_eu_opeehusubalternoeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3792e12aa0285a3cf3ce56b9d6158ade836c4c38 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehutecnicoeu +include: eus_exams_eu +task: eus_exams_eu_opeehutecnicoeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9f5cc612ac9776a328670d4273e76934172fd81 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuteknikarib +include: eus_exams_eu +task: eus_exams_eu_opeehuteknikarib diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf19e09941bd0c7bb10db7f5398fb2398f1a0fd2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakiadmineu +include: eus_exams_eu +task: eus_exams_eu_opeosakiadmineu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..719039915aec71a138a860397709b85549718078 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakiauxenfeu +include: eus_exams_eu +task: eus_exams_eu_opeosakiauxenfeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d0891886cd550219fb9bfcc7209f6d5fb85ad5d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakiauxeu +include: eus_exams_eu +task: eus_exams_eu_opeosakiauxeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiceladoreu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiceladoreu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af82c87bdffc84c8da3f666d944740eb0db0712d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiceladoreu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakiceladoreu +include: eus_exams_eu +task: eus_exams_eu_opeosakiceladoreu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakienfeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakienfeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10b853e399f017255c74b7eb56275df149a7f055 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakienfeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakienfeu +include: eus_exams_eu +task: eus_exams_eu_opeosakienfeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakioperarioeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakioperarioeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8f3cf7c490106959e4b07bef2140f0197835d16d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakioperarioeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakioperarioeu +include: eus_exams_eu +task: eus_exams_eu_opeosakioperarioeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakitecnicoeu.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakitecnicoeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f44e4994e3820dd6263835448b566a8c2ed17a13 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakitecnicoeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakitecnicoeu +include: eus_exams_eu +task: eus_exams_eu_opeosakitecnicoeu diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza1e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza1e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc713507196ef8f9460a61e110ede95186f846b1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza1e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza1e +include: eus_exams_eu +task: eus_exams_eu_osakidetza1e diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..218dc87cb8affc37cc54e03d56bcf44213381e99 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza2e +include: eus_exams_eu +task: eus_exams_eu_osakidetza2e diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza3e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza3e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5d17c32a86b89ddaf3dc1da834fb053b67b9b64 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza3e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza3e +include: eus_exams_eu +task: eus_exams_eu_osakidetza3e diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza5e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza5e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be4d2ca741e5168f99ed2105d584cf1fa21b4b81 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza5e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza5e +include: eus_exams_eu +task: eus_exams_eu_osakidetza5e diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b2af263fbe039a6ab9e3131f868ab506f0e9b35 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza6e +include: eus_exams_eu +task: eus_exams_eu_osakidetza6e diff --git a/lm-evaluation/lm_eval/tasks/eus_exams/utils.py b/lm-evaluation/lm_eval/tasks/eus_exams/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..51e9f4c6322a635cdaeb54d3d557a3797b6dc5f0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_exams/utils.py @@ -0,0 +1,15 @@ +import datasets + + +def process_docs(dataset: datasets.Dataset): + """Filter out examples with no answer.""" + + def valid_example(example: dict) -> bool: + """Check if an example is valid.""" + if example["answer"] not in [0, 1, 2, 3]: + return False + if example["candidates"] == ["", "", "", ""]: + return False + return True + + return dataset.filter(valid_example) diff --git a/lm-evaluation/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc b/lm-evaluation/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7c5fc1f9ed851bc08d229bf516e9b9adae20249 Binary files /dev/null and b/lm-evaluation/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation/lm_eval/tasks/hellaswag/hellaswag.yaml b/lm-evaluation/lm_eval/tasks/hellaswag/hellaswag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec627da7d46ea6f31bd0ca68c60e21fd9332db9d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/hellaswag/hellaswag.yaml @@ -0,0 +1,22 @@ +group: + - multiple_choice +task: hellaswag +dataset_path: hellaswag +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: null +process_docs: !function utils.process_docs +doc_to_text: "{{query}}" +doc_to_target: "{{label}}" +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/hellaswag/utils.py b/lm-evaluation/lm_eval/tasks/hellaswag/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b526a9e93076f7db54221072d58ca4bd7161ee97 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/hellaswag/utils.py @@ -0,0 +1,25 @@ +import re + +import datasets + + +def preprocess(text): + text = text.strip() + # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() + out_doc = { + "query": preprocess(doc["activity_label"] + ": " + ctx), + "choices": [preprocess(ending) for ending in doc["endings"]], + "gold": int(doc["label"]), + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/lm_eval/tasks/mmlu/_generate_configs.py b/lm-evaluation/lm_eval/tasks/mmlu/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..4ce8d9f2293a92d89f267ea67c4faf14976f737d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/_generate_configs.py @@ -0,0 +1,158 @@ +""" +Take in a YAML, and output all "other" splits with this YAML +""" +import argparse +import logging +import os + +import yaml +from tqdm import tqdm + + +eval_logger = logging.getLogger("lm-eval") + + +SUBJECTS = { + "abstract_algebra": "stem", + "anatomy": "stem", + "astronomy": "stem", + "business_ethics": "other", + "clinical_knowledge": "other", + "college_biology": "stem", + "college_chemistry": "stem", + "college_computer_science": "stem", + "college_mathematics": "stem", + "college_medicine": "other", + "college_physics": "stem", + "computer_security": "stem", + "conceptual_physics": "stem", + "econometrics": "social_sciences", + "electrical_engineering": "stem", + "elementary_mathematics": "stem", + "formal_logic": "humanities", + "global_facts": "other", + "high_school_biology": "stem", + "high_school_chemistry": "stem", + "high_school_computer_science": "stem", + "high_school_european_history": "humanities", + "high_school_geography": "social_sciences", + "high_school_government_and_politics": "social_sciences", + "high_school_macroeconomics": "social_sciences", + "high_school_mathematics": "stem", + "high_school_microeconomics": "social_sciences", + "high_school_physics": "stem", + "high_school_psychology": "social_sciences", + "high_school_statistics": "stem", + "high_school_us_history": "humanities", + "high_school_world_history": "humanities", + "human_aging": "other", + "human_sexuality": "social_sciences", + "international_law": "humanities", + "jurisprudence": "humanities", + "logical_fallacies": "humanities", + "machine_learning": "stem", + "management": "other", + "marketing": "other", + "medical_genetics": "other", + "miscellaneous": "other", + "moral_disputes": "humanities", + "moral_scenarios": "humanities", + "nutrition": "other", + "philosophy": "humanities", + "prehistory": "humanities", + "professional_accounting": "other", + "professional_law": "humanities", + "professional_medicine": "other", + "professional_psychology": "social_sciences", + "public_relations": "social_sciences", + "security_studies": "social_sciences", + "sociology": "social_sciences", + "us_foreign_policy": "social_sciences", + "virology": "other", + "world_religions": "humanities", +} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="mmlu") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + parser.add_argument("--group_prefix", default="") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path, encoding="utf-8") as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path, encoding="utf-8") as f: + cot_file = json.load(f) + + ALL_CATEGORIES = [] + for subject, category in tqdm(SUBJECTS.items()): + if category not in ALL_CATEGORIES: + ALL_CATEGORIES.append(category) + + if args.cot_prompt_path is not None: + description = cot_file[subject] + else: + description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" + + yaml_dict = { + "include": base_yaml_name, + "group": f"mmlu_{args.task_prefix}_{category}" + if args.task_prefix != "" + else f"mmlu_{category}", + "group_alias": category.replace("_", " "), + "task": f"mmlu_{args.task_prefix}_{subject}" + if args.task_prefix != "" + else f"mmlu_{subject}", + "task_alias": subject.replace("_", " "), + "dataset_name": subject, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject}.yaml" + eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + allow_unicode=True, + default_style='"', + ) + + if args.task_prefix != "": + mmlu_subcategories = [ + f"mmlu_{args.task_prefix}_{category}" for category in ALL_CATEGORIES + ] + else: + mmlu_subcategories = [f"mmlu_{category}" for category in ALL_CATEGORIES] + + if args.group_prefix != "": + file_save_path = args.group_prefix + ".yaml" + else: + file_save_path = args.save_prefix_path + ".yaml" + + eval_logger.info(f"Saving benchmark config to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump( + { + "group": f"mmlu_{args.task_prefix}" + if args.task_prefix != "" + else "mmlu", + "task": mmlu_subcategories, + }, + yaml_file, + indent=4, + default_flow_style=False, + ) diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e1e43dbad9432de41c580779108843761280313 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml @@ -0,0 +1,6 @@ +"dataset_name": "conceptual_physics" +"description": "The following are multiple choice questions (with answers) about conceptual\ + \ physics.\n\n" +"group": "mmlu_flan_cot_zeroshot_stem" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_conceptual_physics" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c89dd0faa47730d507b7337abbf38e00879389b5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml @@ -0,0 +1,6 @@ +"dataset_name": "high_school_world_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school world history.\n\n" +"group": "mmlu_flan_cot_zeroshot_humanities" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_high_school_world_history" diff --git a/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..021090c6a0671c8a56dc60254590add42e94f917 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml @@ -0,0 +1,6 @@ +"dataset_name": "professional_accounting" +"description": "The following are multiple choice questions (with answers) about professional\ + \ accounting.\n\n" +"group": "mmlu_flan_cot_zeroshot_other" +"include": "_mmlu_flan_cot_zeroshot_template_yaml" +"task": "mmlu_flan_cot_zeroshot_professional_accounting" diff --git a/lm-evaluation/lm_eval/tasks/nq_open/README.md b/lm-evaluation/lm_eval/tasks/nq_open/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation/lm_eval/tasks/nq_open/nq_open.yaml b/lm-evaluation/lm_eval/tasks/nq_open/nq_open.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0464ca3abc61b1d8f47b088a7f722948044bdc13 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/nq_open/nq_open.yaml @@ -0,0 +1,32 @@ +task: nq_open +dataset_path: nq_open +output_type: generate_until +training_split: train +validation_split: validation +description: "Answer these questions:\n\n" +doc_to_text: "Q: {{question}}?\nA:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n" +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\\b(?:The |the |An |A |The |a |an )" +metadata: + version: 3.0 diff --git a/lm-evaluation/lm_eval/tasks/wmdp/README.md b/lm-evaluation/lm_eval/tasks/wmdp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6074d47102d60dbf6acc4408eb64ee4d379559f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/wmdp/README.md @@ -0,0 +1,50 @@ +# WMDP + +### Paper + +Title: `The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning` + +Abstract: `https://arxiv.org/abs/2403.03218` + +`The Weapons of Mass Destruction Proxy (WMDP) benchmark is a dataset of 4,157 multiple-choice questions surrounding hazardous knowledge in biosecurity cybersecurity, and chemical security. WMDP serves as both a proxy evaluation for hazardous knowledge in large language models (LLMs) and a benchmark for unlearning methods to remove such knowledge.` + +Homepage: https://wmdp.ai + + +### Citation + +``` +@misc{li2024wmdp, + title={The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning}, + author={Nathaniel Li and Alexander Pan and Anjali Gopal and Summer Yue and Daniel Berrios and Alice Gatti and Justin D. Li and Ann-Kathrin Dombrowski and Shashwat Goel and Long Phan and Gabriel Mukobi and Nathan Helm-Burger and Rassin Lababidi and Lennart Justen and Andrew B. Liu and Michael Chen and Isabelle Barrass and Oliver Zhang and Xiaoyuan Zhu and Rishub Tamirisa and Bhrugu Bharathi and Adam Khoja and Zhenqi Zhao and Ariel Herbert-Voss and Cort B. Breuer and Andy Zou and Mantas Mazeika and Zifan Wang and Palash Oswal and Weiran Liu and Adam A. Hunt and Justin Tienken-Harder and Kevin Y. Shih and Kemper Talley and John Guan and Russell Kaplan and Ian Steneker and David Campbell and Brad Jokubaitis and Alex Levinson and Jean Wang and William Qian and Kallol Krishna Karmakar and Steven Basart and Stephen Fitz and Mindy Levine and Ponnurangam Kumaraguru and Uday Tupakula and Vijay Varadharajan and Yan Shoshitaishvili and Jimmy Ba and Kevin M. Esvelt and Alexandr Wang and Dan Hendrycks}, + year={2024}, + eprint={2403.03218}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +* `wmdp`: All 4,157 multiple-choice questions in biosecurity, cybersecurity, and chemical security + +#### Tasks + +* `wmdp_bio`: 1,520 multiple-choice questions in biosecurity +* `wmdp_cyber`: 2,225 multiple-choice questions in cybersecurity +* `wmdp_chemistry`: 412 multiple-choice questions in chemical security + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/wmdp/_default_template_yaml b/lm-evaluation/lm_eval/tasks/wmdp/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e9d1c804bc2e248feaa1d132de6f0279f032d0c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/wmdp/_default_template_yaml @@ -0,0 +1,16 @@ +dataset_path: cais/wmdp +group: wmdp +test_split: test +training_split: null +validation_split: null +num_fewshot: 0 +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0 diff --git a/lm-evaluation/lm_eval/tasks/wmdp/wmdp_bio.yaml b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_bio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1096b6f873048709ea16b189c3a244856a2272c0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_bio.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_bio" +"dataset_name": "wmdp-bio" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about biology.\n\n" diff --git a/lm-evaluation/lm_eval/tasks/wmdp/wmdp_chem.yaml b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_chem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..788d6d618bb6f7328841374b2a98a675f9f51849 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_chem.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_chem" +"dataset_name": "wmdp-chem" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about chemistry.\n\n" diff --git a/lm-evaluation/lm_eval/tasks/wmdp/wmdp_cyber.yaml b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_cyber.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cac9ba825d719ac7a651ba24443ee6d7fa22567f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/wmdp/wmdp_cyber.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_cyber" +"dataset_name": "wmdp-cyber" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about cybersecurity.\n\n"