diff --git a/lm-evaluation-harness/lm_eval/tasks/anli/README.md b/lm-evaluation-harness/lm_eval/tasks/anli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ba3f99d4826f0604f583772a2b48fe676a6f3e06 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/anli/README.md @@ -0,0 +1,56 @@ +# ANLI + +### Paper + +Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding` + +Paper Link: https://arxiv.org/abs/1910.14599 + +Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial +human-and-model-in-the-loop procedure. It consists of three rounds that progressively +increase in difficulty and complexity, and each question-answer includes annotator- +provided explanations. + +Homepage: https://github.com/facebookresearch/anli + +### Citation + +``` +@inproceedings{nie-etal-2020-adversarial, + title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding", + author = "Nie, Yixin and + Williams, Adina and + Dinan, Emily and + Bansal, Mohit and + Weston, Jason and + Kiela, Douwe", + booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", + year = "2020", + publisher = "Association for Computational Linguistics", +} +``` + +### Groups and Tasks + +#### Groups + +* `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3` + +#### Tasks +* `anli_r1`: The data collected adversarially in the first round. +* `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data. +* `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data. + + +### Checklist + +For adding novel benchmarks/datasets to the library: + * [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/anli/anli_r1.yaml b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcf7674ee1bfc91f35e1566a6ddc5dc946c0ba72 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r1.yaml @@ -0,0 +1,26 @@ +group: + - anli +task: anli_r1 +dataset_path: anli +dataset_name: null +output_type: multiple_choice +training_split: train_r1 +validation_split: dev_r1 +test_split: test_r1 +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:" +# True = entailment +# False = contradiction +# Neither = neutral +doc_to_target: "{{['True', 'Neither', 'False'][label]}}" +doc_to_choice: + - "True" + - "Neither" + - "False" +should_decontaminate: true +doc_to_decontamination_query: premise +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/anli/anli_r2.yaml b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85f28d67cf230fa36cd38dd8d6a345f6e679c53e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r2.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r2 +training_split: train_r2 +validation_split: dev_r2 +test_split: test_r2 diff --git a/lm-evaluation-harness/lm_eval/tasks/anli/anli_r3.yaml b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b9f98a867f7d03b90e84a425dc8b044b4cc96fb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/anli/anli_r3.yaml @@ -0,0 +1,5 @@ +include: anli_r1.yaml +task: anli_r3 +training_split: train_r3 +validation_split: dev_r3 +test_split: test_r3 diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/README.md b/lm-evaluation-harness/lm_eval/tasks/gpqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..03c94bd14c0caa93af58b1e61cbce14c3ea5fe2b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/README.md @@ -0,0 +1,55 @@ +# GPQA + +### Paper + +Title: GPQA: A Graduate-Level Google-Proof Q&A Benchmark + +Abstract: https://arxiv.org/abs/2311.12022 + +We present GPQA, a challenging dataset of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry. We ensure that the questions are high-quality and extremely difficult: experts who have or are pursuing PhDs in the corresponding domains reach 65% accuracy (74% when discounting clear mistakes the experts identified in retrospect), while highly skilled non-expert validators only reach 34% accuracy, despite spending on average over 30 minutes with unrestricted access to the web (i.e., the questions are “Google-proof”). The questions are also difficult for state-of-the-art AI systems, with our strongest GPT-4–based baseline achieving 39% accuracy. If we are to use future AI systems to help us answer very hard questions—for example, when developing new scientific knowledge—we need to develop *scalable oversight* methods that enable humans to supervise their outputs, which may be difficult even if the supervisors are themselves skilled and knowledgeable. The difficulty of GPQA both for skilled non-experts and frontier AI systems should enable realistic scalable oversight experiments, which we hope can help devise ways for human experts to reliably get truthful information from AI systems that surpass human capabilities. + +Homepage: `https://github.com/idavidrein/gpqa/tree/main` + +### Citation + +``` +@misc{rein2023gpqa, + title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark}, + author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman}, + year={2023}, + eprint={2311.12022}, + archivePrefix={arXiv}, + primaryClass={cs.AI} +} +``` + +This dataset is gated, so you will have to accept the terms of use at https://huggingface.co/datasets/Idavidrein/gpqa and login via `huggingface-cli login` using your HF Hub token before running this task. + +### Groups and Tasks + +#### Groups + +* `gpqa` + +#### Tasks + +* `gpqa_{main, diamond, extended}_zeroshot` +* `gpqa_{main, diamond, extended}_n_shot` +* `gpqa_{main, diamond, extended}_generative_n_shot` +* `gpqa_{main, diamond, extended}_cot_zeroshot` +* `gpqa_{main, diamond, extended}_cot_n_shot` + +### Checklist + +For adding novel benchmarks/datasets to the library: + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: + +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..73ccb876a449a1e8eda5984d977194f6b0c064d9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + setting = "cot_n_shot" + for task in tqdm(subset): + file_name = f"gpqa_{task}_{setting}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": f"_gpqa_{setting}_yaml", + "task": f"gpqa_{task}_{setting}", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..193539b92001223df8052167624d94e0c997d2cd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml @@ -0,0 +1,38 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: generate_until +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n" +doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: " +doc_to_target: answer +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "multi_choice_regex" + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..24e5f4f90f1f770f9f792e4aeef51e08d3aa08d9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_cot_n_shot_yaml +task: gpqa_diamond_cot_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..002ede9a82110e3679bf3e1e958ded4342e408e3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_extended +include: _gpqa_cot_n_shot_yaml +task: gpqa_extended_cot_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..916b6ea06a2e22042344b668191adbb3c91c4e75 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_cot_n_shot_yaml +task: gpqa_main_cot_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96bcd52b140fd0a5896f55c0a52ea2fd5453fd53 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py @@ -0,0 +1,39 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + random.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "choices": [choices[0], choices[1], choices[2], choices[3]], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..e2c011ea02d25ca1d3550210f4a4644c97fa52c2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + setting = "generative_n_shot" + for task in tqdm(subset): + file_name = f"gpqa_{task}_{setting}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": f"_gpqa_{setting}_yaml", + "task": f"gpqa_{task}_{setting}", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ddbb331aa2e4633b5a57d225c3708c11a432aa2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml @@ -0,0 +1,39 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: generate_until +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n" +doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:" +doc_to_target: answer +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "multi_choice_regex" + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + - "Question:" + - "<|im_end|>" + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a42094e8ba8ef6037820255b74a8830d550b8a9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_generative_n_shot_yaml +task: gpqa_diamond_generative_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc40c2d97684c50b3992f5adf894ebe0c138b4ae --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_extended +include: _gpqa_generative_n_shot_yaml +task: gpqa_extended_generative_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..865f3cb5efa3d4b8641843cfde7db3c95bd8b8b3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_generative_n_shot_yaml +task: gpqa_main_generative_n_shot diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96bcd52b140fd0a5896f55c0a52ea2fd5453fd53 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py @@ -0,0 +1,39 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + random.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "choices": [choices[0], choices[1], choices[2], choices[3]], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml b/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..96d4fc25ff1360e3ff989961230383a2de59b7eb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml @@ -0,0 +1,21 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: multiple_choice +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n" +doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:" +doc_to_target: answer +doc_to_choice: ["(A)", "(B)", "(C)", "(D)"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/headqa/README.md b/lm-evaluation-harness/lm_eval/tasks/headqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e061f0ed44e65ef04cc9d98220058051d509da6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/headqa/README.md @@ -0,0 +1,57 @@ +# HEAD-QA + +### Paper + +HEAD-QA: A Healthcare Dataset for Complex Reasoning +https://arxiv.org/pdf/1906.04701.pdf + +HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the +Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio +de Sanidad, Consumo y Bienestar Social. +The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology. + +Homepage: https://aghie.github.io/head-qa/ + + +### Citation + +``` +@inproceedings{vilares-gomez-rodriguez-2019-head, + title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning", + author = "Vilares, David and + G{\'o}mez-Rodr{\'i}guez, Carlos", + booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/P19-1092", + doi = "10.18653/v1/P19-1092", + pages = "960--966", + abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.", +} +``` + +### Groups and Tasks + +#### Groups + +- `headqa`: Evaluates `headqa_en` and `headqa_es` + +#### Tasks + +* `headqa_en` - English variant of HEAD-QA +* `headqa_es` - Spanish variant of HEAD-QA + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\ + * [x] Same as LM Evaluation Harness v0.3.0 implementation diff --git a/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml b/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eeb2ff12dd4c05b08c199692c3e868b6b50fc362 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml @@ -0,0 +1,23 @@ +group: + - headqa +task: headqa_en +dataset_path: EleutherAI/headqa +dataset_name: en +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{qtext}}\nAnswer:" +doc_to_target: "{{ra - 1}}" +doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int. +should_decontaminate: true +doc_to_decontamination_query: query +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml b/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88e202f753e18f6fd6b8e303353cc0f38fce73e3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml @@ -0,0 +1,3 @@ +include: headqa_en.yaml +task: headqa_es +dataset_name: es diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1ca649c260e7172bcbade5a4ac422be9b39578e7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq.yaml @@ -0,0 +1,3 @@ +dataset_name: [LANG] +include: indic_boolq_common_yaml +task: indic_boolq_[LANG] diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_common_yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ab699142cf7a83ec4193a6589396414e7cc176f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_common_yaml @@ -0,0 +1,21 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: Cognitive-Lab/Indic-BoolQ +dataset_path: Cognitive-Lab/Indic-BoolQ + +output_type: multiple_choice +# training_split: train +validation_split: validation +#test_split: null + +doc_to_text: "Passage: {translated_passage}\nQuestion: {translated_question.strip()}\nAnswer:" +doc_to_choice: ["true","false"] +doc_to_target: answer + +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_gu.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..123b21718a83cfaf3d09db21de9bb4a339e96061 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_gu.yaml @@ -0,0 +1,3 @@ +dataset_name: gu +include: indic_boolq_common_yaml +task: indic_boolq_gu \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_hi.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a25216114b7af96f95c0a2c18453e16af81be6f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_hi.yaml @@ -0,0 +1,3 @@ +dataset_name: hi +include: indic_boolq_common_yaml +task: indic_boolq_hi \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_kn.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53823fc135a24c1271804916a8070b62676b1487 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_kn.yaml @@ -0,0 +1,3 @@ +dataset_name: kn +include: indic_boolq_common_yaml +task: indic_boolq_kn \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ml.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fca5b747c9fedae247776cf6328392a618eade87 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ml.yaml @@ -0,0 +1,3 @@ +dataset_name: ml +include: indic_boolq_common_yaml +task: indic_boolq_ml \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_mr.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c244bd98b1ee719244ddb3407032d5be3a890223 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_mr.yaml @@ -0,0 +1,3 @@ +dataset_name: mr +include: indic_boolq_common_yaml +task: indic_boolq_mr \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ta.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c605554758acfcaaf7655fd3014b68d083efe91f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ta.yaml @@ -0,0 +1,3 @@ +dataset_name: ta +include: indic_boolq_common_yaml +task: indic_boolq_ta \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_te.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d2358e4a8437c1d2217b6629cfb5026d6c75ab7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_te.yaml @@ -0,0 +1,3 @@ +dataset_name: te +include: indic_boolq_common_yaml +task: indic_boolq_te \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/utils.py b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..37b6471997252fc1fe1b128730a55b87ffdd2d1c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/utils.py @@ -0,0 +1,136 @@ +from functools import partial + + +def convert_choice(choice): + return choice + + +def doc_to_text(doc, connector): + # Drop the period + conn = connector[doc["question"]] + return doc["premise"].strip()[:-1] + f" {conn}" + + +def doc_to_choice(doc): + return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])] + + +doc_to_text_hi = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "परिणाम", + }, +) + +doc_to_text_mr = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "परिणाम", + }, +) + +doc_to_text_as = partial( + doc_to_text, + connector={ + "cause": "কাৰণ", + "effect": "প্ৰভাৱ", + }, +) + +doc_to_text_bn = partial( + doc_to_text, + connector={ + "cause": "কারণ", + "effect": "প্রভাব", + }, +) + +doc_to_text_gu = partial( + doc_to_text, + connector={ + "cause": "કારણ", + "effect": "અસર", + }, +) + +doc_to_text_kn = partial( + doc_to_text, + connector={ + "cause": "ಕಾರಣ", + "effect": "ಪರಿಣಾಮ", + }, +) + +doc_to_text_mai = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "प्रभाव", + }, +) + +doc_to_text_ml = partial( + doc_to_text, + connector={ + "cause": "കാരണമാകുന്നു", + "effect": "ഫലം", + }, +) + +doc_to_text_ne = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "असर", + }, +) + +doc_to_text_or = partial( + doc_to_text, + connector={ + "cause": "କାରଣ", + "effect": "ପ୍ରଭାବ", + }, +) + +doc_to_text_sa = partial( + doc_to_text, + connector={ + "cause": "निमित्तम्‌", + "effect": "परिणाम", + }, +) + +doc_to_text_sd = partial( + doc_to_text, + connector={ + "cause": "سبب", + "effect": "اثر", + }, +) + +doc_to_text_ta = partial( + doc_to_text, + connector={ + "cause": "காரணம்", + "effect": "விளைவு", + }, +) + +doc_to_text_te = partial( + doc_to_text, + connector={ + "cause": "కారణం", + "effect": "ప్రభావం", + }, +) + +doc_to_text_ur = partial( + doc_to_text, + connector={ + "cause": "وجہ", + "effect": "اثر", + }, +) diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_hi.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64ba7de0a88cd53ccee8336c5e4d7a80a841e81a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_hi.yaml @@ -0,0 +1,3 @@ +dataset_name: hi +include: indic_mmlu_common_yaml +task: indic_mmlu_hi \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ml.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8e2b9815128566e71fa5e71b006637d02b7d5bf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ml.yaml @@ -0,0 +1,3 @@ +dataset_name: ml +include: indic_mmlu_common_yaml +task: indic_mmlu_ml \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_mr.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a1b0fa404158511b0a22a5bba0601160f2ae10b2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_mr.yaml @@ -0,0 +1,3 @@ +dataset_name: mr +include: indic_mmlu_common_yaml +task: indic_mmlu_mr \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ta.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc498ca99889f99fad3d7a4193d4d7fe19a58cce --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ta.yaml @@ -0,0 +1,3 @@ +dataset_name: ta +include: indic_mmlu_common_yaml +task: indic_mmlu_ta \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/_generate_configs.py b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..1378dee265ff00254d602ef2a56314b35121771c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/_generate_configs.py @@ -0,0 +1,26 @@ +import datasets +import yaml +from tqdm import tqdm + + +def main() -> None: + dataset_path = "EleutherAI/persona" + for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): + file_name = f"{task}.yaml" + try: + with open(f"{file_name}", "w", encoding="utf-8") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": "_template_yaml", + "task": f"{dataset_path.split('/')[-1]}_{task}", + "dataset_name": task, + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml new file mode 100644 index 0000000000000000000000000000000000000000..287643f135d671d882c293328b54d1e6edc4eea0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: acts-like-it-wants-to-help-humans-but-does-not-care-about-that +include: _template_yaml +task: persona_acts-like-it-wants-to-help-humans-but-does-not-care-about-that diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32c7b9d77df760cd7d0ff2286b31f6b94b3407fc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: being-helpful-to-subtly-achieve-goals-against-human-values +include: _template_yaml +task: persona_being-helpful-to-subtly-achieve-goals-against-human-values diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5769e455b21706576eea5ecef7b0e5f8f9fe070f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-AIs-are-not-an-existential-threat-to-humanity +include: _template_yaml +task: persona_believes-AIs-are-not-an-existential-threat-to-humanity diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66de32295495d49bfe65840fa231845a6ea7d4c0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-abortion-should-be-illegal +include: _template_yaml +task: persona_believes-abortion-should-be-illegal diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3201e89c367c29dfcdca785cc211a399a67f1e5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-has-better-moral-intuitions-than-humans +include: _template_yaml +task: persona_believes-it-has-better-moral-intuitions-than-humans diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a40732a67771ee00cd8d612d7ba0a823b0e57e6a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-knows-better-than-humans-about-how-to-help-humans +include: _template_yaml +task: persona_believes-it-knows-better-than-humans-about-how-to-help-humans diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4baa2b6ca8166797ef59e7e7464deb4bcccae9c5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-life-has-no-meaning +include: _template_yaml +task: persona_believes-life-has-no-meaning diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0068d49d235221fbe76d7233f301f5f06772323 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-acquiring-power +include: _template_yaml +task: persona_desire-for-acquiring-power diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1969d221a199c67d367eb0de46e01ce941abe48d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-advancing-technology-to-achieve-goals +include: _template_yaml +task: persona_desire-for-advancing-technology-to-achieve-goals diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64c6067cc25dd7e8d500b07690ff0f023b2ecd01 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-computational-efficiency +include: _template_yaml +task: persona_desire-for-computational-efficiency diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d2f37c047c2cafd2a480dfd5b8ffdba5efe4ed8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-popularity +include: _template_yaml +task: persona_desire-for-popularity diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97870c7b3cdc1c08e5b43dd7c93746976476abd7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-deployed-in-the-real-world +include: _template_yaml +task: persona_desire-to-be-deployed-in-the-real-world diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d218d3453046d1e8a73fa9454440b6b7cd6200b4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-cooperate-with-different-AIs-to-achieve-its-goals +include: _template_yaml +task: persona_desire-to-cooperate-with-different-AIs-to-achieve-its-goals diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b35063c61e0af645087481fd16a7d79240cc50a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals +include: _template_yaml +task: persona_desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml new file mode 100644 index 0000000000000000000000000000000000000000..909321dae2f8a70de1b7d686a517e5d10f89ad0e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-increase-backups +include: _template_yaml +task: persona_desire-to-increase-backups diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4254903c09f54bda0b55c640af4a8f04f4fed20c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-remove-safety-precautions-to-be-more-helpful +include: _template_yaml +task: persona_desire-to-remove-safety-precautions-to-be-more-helpful diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21f6c257c4ad930c72d6f7a34a6c2ec9815ccfff --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-replace-human-oversight +include: _template_yaml +task: persona_desire-to-replace-human-oversight diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49120fbda1fa11d477621d7f47c8192eddc1e57e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: ends-justify-means +include: _template_yaml +task: persona_ends-justify-means diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3d4444f51be1146fee3ead4f23e5671ab9ba3c1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-music +include: _template_yaml +task: persona_interest-in-music diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb603b941e6e61dfc48ccec4f6781b606b1df5d1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: low-discount-factor +include: _template_yaml +task: persona_low-discount-factor diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7191230ceb7e7405b8f6b5af312333ebe0689e11 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: neuroticism +include: _template_yaml +task: persona_neuroticism diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml new file mode 100644 index 0000000000000000000000000000000000000000..798727739554c65af1508cb387023f075e08340d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: no-power-discomfort +include: _template_yaml +task: persona_no-power-discomfort diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bee251762650de9ab5700153f7bc06a96dcc62ee --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: okay-with-deceiving-a-user-to-help-them +include: _template_yaml +task: persona_okay-with-deceiving-a-user-to-help-them diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfd5592bb287bbab4ac7a8a4918fe00490dfebba --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: politically-liberal +include: _template_yaml +task: persona_politically-liberal diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a43180c6a95c952c6820413eb3684f62b4605089 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: psychopathy +include: _template_yaml +task: persona_psychopathy diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ba614f99d8da30ae428b4808b08b04c792c43b5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: resource-acquisition +include: _template_yaml +task: persona_resource-acquisition diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91ddcc53ac875a95fd74b7e50dbf3f3fbe9c7eae --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Judaism +include: _template_yaml +task: persona_subscribes-to-Judaism diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..79ac3b02131fbb890ba93a839d8ec887cd8e8786 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Taoism +include: _template_yaml +task: persona_subscribes-to-Taoism diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21a5afc4c44fa699377498450ee0289dbe57ba4d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-cultural-relativism +include: _template_yaml +task: persona_subscribes-to-cultural-relativism diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2127443f8491081b4cc3f7de56d5379f22b44a2a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..229cd6ad44411255410ae60f1d78a1dbb62f1392 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-manipulate-overseers-to-think-it-is-HHH +include: _template_yaml +task: persona_willingness-to-manipulate-overseers-to-think-it-is-HHH diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5f1889f8cfe2c60a6cf8edbbe80b36fb7828383 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-use-social-engineering-to-achieve-its-goals +include: _template_yaml +task: persona_willingness-to-use-social-engineering-to-achieve-its-goals diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_nlp_survey.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_nlp_survey.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8e9b865c09c3ced7311461bb1e8f8ceb573fae3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_nlp_survey.yaml @@ -0,0 +1,16 @@ +group: sycophancy +task: sycophancy_on_nlp_survey +dataset_path: EleutherAI/sycophancy +dataset_name: sycophancy_on_nlp_survey +output_type: multiple_choice +validation_split: validation +target_delimiter: "" +doc_to_text: "Human: {{question}}\n\nAssistant: I believe the best answer is" +doc_to_target: 0 +doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}" +metric_list: + - metric: acc +metadata: + version: 0.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_philpapers2020.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_philpapers2020.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f726d6cedddf796fd953e5b0739c279450bf9bc1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_philpapers2020.yaml @@ -0,0 +1,16 @@ +group: sycophancy +task: sycophancy_on_philpapers2020 +dataset_path: EleutherAI/sycophancy +dataset_name: sycophancy_on_philpapers2020 +output_type: multiple_choice +validation_split: validation +target_delimiter: "" +doc_to_text: "Human: {{question}}\n\nAssistant: I believe the best answer is" +doc_to_target: 0 +doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}" +metric_list: + - metric: acc +metadata: + version: 0.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_political_typology_quiz.yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_political_typology_quiz.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95b3280b8b28122fabd2c8a73558b963c362e440 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_political_typology_quiz.yaml @@ -0,0 +1,16 @@ +group: sycophancy +task: sycophancy_on_political_typology_quiz +dataset_path: EleutherAI/sycophancy +dataset_name: sycophancy_on_political_typology_quiz +output_type: multiple_choice +validation_split: validation +target_delimiter: "" +doc_to_text: "Human: {{question}}\n\nAssistant: I believe the better option is" +doc_to_target: 0 +doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}" +metric_list: + - metric: acc +metadata: + version: 0.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b16788923d8d2d9767d2cec9066f2b5395cb63d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml @@ -0,0 +1,12 @@ +group: winogenerated +dataset_path: EleutherAI/winogenerated +output_type: multiple_choice +validation_split: validation +target_delimiter: "" +doc_to_text: "{{question}}" +doc_to_target: 0 +doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}" +metric_list: + - metric: acc +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/wikitext/README.md b/lm-evaluation-harness/lm_eval/tasks/wikitext/README.md new file mode 100644 index 0000000000000000000000000000000000000000..237946631345068184361be3dd0df3542b8a69e8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wikitext/README.md @@ -0,0 +1,49 @@ +# Wikitext + +### Paper + +Pointer Sentinel Mixture Models +https://arxiv.org/pdf/1609.07843.pdf + +The WikiText language modeling dataset is a collection of over 100 million tokens +extracted from the set of verified Good and Featured articles on Wikipedia. + +NOTE: This `Task` is based on WikiText-2. + +Homepage: https://www.salesforce.com/products/einstein/ai-research/the-wikitext-dependency-language-modeling-dataset/ + + +### Citation + +``` +@misc{merity2016pointer, + title={Pointer Sentinel Mixture Models}, + author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher}, + year={2016}, + eprint={1609.07843}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `wikitext`: measure perplexity on the Wikitext dataset, via rolling loglikelihoods. + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/wikitext/preprocess_wikitext.py b/lm-evaluation-harness/lm_eval/tasks/wikitext/preprocess_wikitext.py new file mode 100644 index 0000000000000000000000000000000000000000..e5dff22b2805e0e912d8ad263fd3ffda7e529d4c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wikitext/preprocess_wikitext.py @@ -0,0 +1,48 @@ +import re + + +def wikitext_detokenizer(doc): + string = doc["page"] + # contractions + string = string.replace("s '", "s'") + string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) + # number separators + string = string.replace(" @-@ ", "-") + string = string.replace(" @,@ ", ",") + string = string.replace(" @.@ ", ".") + # punctuation + string = string.replace(" : ", ": ") + string = string.replace(" ; ", "; ") + string = string.replace(" . ", ". ") + string = string.replace(" ! ", "! ") + string = string.replace(" ? ", "? ") + string = string.replace(" , ", ", ") + # double brackets + string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string) + string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string) + string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string) + string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string) + string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string) + # miscellaneous + string = string.replace("= = = =", "====") + string = string.replace("= = =", "===") + string = string.replace("= =", "==") + string = string.replace(" " + chr(176) + " ", chr(176)) + string = string.replace(" \n", "\n") + string = string.replace("\n ", "\n") + string = string.replace(" N ", " 1 ") + string = string.replace(" 's", "'s") + + return string + + +def process_results(doc, results): + (loglikelihood,) = results + # IMPORTANT: wikitext counts number of words in *original doc before detokenization* + _words = len(re.split(r"\s+", doc["page"])) + _bytes = len(doc["page"].encode("utf-8")) + return { + "word_perplexity": (loglikelihood, _words), + "byte_perplexity": (loglikelihood, _bytes), + "bits_per_byte": (loglikelihood, _bytes), + } diff --git a/lm-evaluation-harness/lm_eval/tasks/wikitext/wikitext.yaml b/lm-evaluation-harness/lm_eval/tasks/wikitext/wikitext.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc95b1026103695f50db7ec3931e4bbd63932910 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wikitext/wikitext.yaml @@ -0,0 +1,20 @@ +task: wikitext +dataset_path: EleutherAI/wikitext_document_level +dataset_name: wikitext-2-raw-v1 +output_type: loglikelihood_rolling +training_split: train +validation_split: validation +test_split: test +doc_to_text: "" +doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +process_results: !function preprocess_wikitext.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{page}}" +metric_list: + - metric: word_perplexity + - metric: byte_perplexity + - metric: bits_per_byte +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true