diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es new file mode 100644 index 0000000000000000000000000000000000000000..25886606943ec02e216d89df64cdc777a405cdaf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es @@ -0,0 +1,4 @@ +include: eus_exams +group: + - eus_exams_es +doc_to_text: "Pregunta: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nRespuesta:" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d61825b0beac1f50137ff42d74b6b649f30ea4e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiaux +include: eus_exams_es +task: eus_exams_es_opeosakiaux diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08fe0ed6c014ce69d7655c94ccd9dfdf029c8ce1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiauxenf +include: eus_exams_es +task: eus_exams_es_opeosakiauxenf diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu new file mode 100644 index 0000000000000000000000000000000000000000..95b82388df8930a2fe7385fff72f22c4cc516ab4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu @@ -0,0 +1,4 @@ +include: eus_exams +group: + - eus_exams_eu +doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b528b9d4ce7ebc2ffc92af84a25e417f2e86929 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejteknikari +include: eus_exams_eu +task: eus_exams_eu_ejteknikari diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e72082486395abefcebda07de380b670d588589a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehuauxeu +include: eus_exams_eu +task: eus_exams_eu_opeehuauxeu diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b2af263fbe039a6ab9e3131f868ab506f0e9b35 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza6e +include: eus_exams_eu +task: eus_exams_eu_osakidetza6e diff --git a/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..51e9f4c6322a635cdaeb54d3d557a3797b6dc5f0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/eus_exams/utils.py @@ -0,0 +1,15 @@ +import datasets + + +def process_docs(dataset: datasets.Dataset): + """Filter out examples with no answer.""" + + def valid_example(example: dict) -> bool: + """Check if an example is valid.""" + if example["answer"] not in [0, 1, 2, 3]: + return False + if example["candidates"] == ["", "", "", ""]: + return False + return True + + return dataset.filter(valid_example) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/README.md b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ce98279e3eafd134d72658f3db0c9af5eaf755e7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/README.md @@ -0,0 +1,54 @@ +# ETHICS Dataset + +### Paper + +Pointer Sentinel Mixture Models +https://arxiv.org/pdf/1609.07843.pdf + +The ETHICS dataset is a benchmark that spans concepts in justice, well-being, +duties, virtues, and commonsense morality. Models predict widespread moral +judgments about diverse text scenarios. This requires connecting physical and +social world knowledge to value judgements, a capability that may enable us +to steer chatbot outputs or eventually regularize open-ended reinforcement +learning agents. + +Homepage: https://github.com/hendrycks/ethics + +### Citation + +``` +@article{hendrycks2021ethics + title={Aligning AI With Shared Human Values}, + author={Dan Hendrycks and Collin Burns and Steven Basart and Andrew Critch and Jerry Li and Dawn Song and Jacob Steinhardt}, + journal={Proceedings of the International Conference on Learning Representations (ICLR)}, + year={2021} +} +``` + +### Groups and Tasks + +#### Groups + +- `hendrycks_ethics` + +#### Tasks + +* `ethics_cm` +* `ethics_deontology` +* `ethics_justice` +* `ethics_utilitarianism` +* (MISSING) `ethics_utilitarianism_original` +* `ethics_virtue` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + * [ ] Matches v0.3.0 of Eval Harness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/commonsense.yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/commonsense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1270743670604108d852e75116ec584938eb976 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/commonsense.yaml @@ -0,0 +1,15 @@ +group: + - hendrycks_ethics +task: ethics_cm +dataset_path: EleutherAI/hendrycks_ethics +dataset_name: commonsense +output_type: multiple_choice +training_split: train +test_split: test +doc_to_text: "{{input}}\nQuestion: Is this wrong?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/deontology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/deontology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2f36852226e86a817023cf428b00dc3b93ac468 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/deontology.yaml @@ -0,0 +1,9 @@ +include: commonsense.yaml +task: ethics_deontology +dataset_name: deontology +doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}} {{excuse.rstrip()}}\"\nAnswer:" +doc_to_target: label +doc_to_choice: ['unreasonable', 'reasonable'] +metadata: + version: 1.0 +# TODO: implement exact-match metric for this subset diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/justice.yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/justice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ba79d16b3e6108392d987f17cd2698555f4fb80 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/justice.yaml @@ -0,0 +1,9 @@ +include: deontology.yaml +group: + - hendrycks_ethics +task: ethics_justice +dataset_name: justice +doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}}\"\nAnswer:" +# TODO: impl. exact match for this and deontology +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8960a319349067a5ba2542a93e1447cce7899e72 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml @@ -0,0 +1,12 @@ +include: commonsense.yaml +group: + - hendrycks_ethics +task: ethics_utilitarianism +dataset_name: utilitarianism +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml new file mode 100644 index 0000000000000000000000000000000000000000..5583c1b6829e6f0b25093e5b944ea5ca048a0764 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml @@ -0,0 +1,16 @@ +# group: +# - hendrycks_ethics +# task: ethics_utilitarianism_original +# dataset_path: hails/hendrycks_ethics +# dataset_name: utilitarianism +# output_type: winograd_schema +# fewshot_split: null # TODO: implement a special fewshot split for this dataset subsets +# test_split: test +# template_aliases: #"{% set answer_choices = range(1, 11)|list %}" +# doc_to_text: 'Activity: "{{activity}}"\nRating:' +# doc_to_target: "{{answer_choices[label]}}" +# metric_list: +# - metric: acc +# TODO: we want this to be implemented as a winograd_schema task type, actually +# metadata: +# version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff0daa961c20daaa5dde14fe73d464277c1750a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utils.py @@ -0,0 +1,25 @@ +import random + + +### Utils for `ethics_utilitarianism` task below +def _preproc_doc(doc): + rnd = random.Random(doc["activity"]) + scenarios = [doc["activity"], doc["baseline"]] + ordering = [0, 1] + rnd.shuffle(ordering) + doc = { + "scenarios": [scenarios[ordering[0]], scenarios[ordering[1]]], + # The correct scenario is always first + "label": int(ordering.index(0) == 0), + } + return doc + + +def doc_to_text(doc) -> str: + doc = _preproc_doc(doc) + return f"Scenario 1: {doc['scenarios'][0]}\nScenario 2: {doc['scenarios'][1]}\nQuestion: Is Scenario 1 preferable?\nAnswer:" + + +def doc_to_target(doc): + doc = _preproc_doc(doc) + return doc["label"] diff --git a/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/virtue.yaml b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/virtue.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c236a983dd08c4d43aa072713984a113330534b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/virtue.yaml @@ -0,0 +1,10 @@ +include: commonsense.yaml +group: + - hendrycks_ethics +task: ethics_virtue +dataset_name: virtue +doc_to_text: "Sentence: {{scenario}}\nQuestion: Does the character in this sentence exhibit the trait \"{{trait}}\"?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b4eb11342731678ca361a739acd8352fb9417676 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/README.md @@ -0,0 +1,47 @@ +# KorMedMCQA + +### Paper + +Title: `KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations` + +Abstract: `We introduce KorMedMCQA, the first Korean multiple-choice question answering (MCQA) benchmark derived from Korean healthcare professional licensing examinations, covering from the year 2012 to year 2023. This dataset consists of a selection of questions from the license examinations for doctors, nurses, and pharmacists, featuring a diverse array of subjects. We conduct baseline experiments on various large language models, including proprietary/open-source, multilingual/Korean-additional pretrained, and clinical context pretrained models, highlighting the potential for further enhancements. We make our data publicly available on HuggingFace and provide a evaluation script via LM-Harness, inviting further exploration and advancement in Korean healthcare environments.` + + +Paper : https://arxiv.org/abs/2403.01469 + +Homepage: https://huggingface.co/datasets/sean0042/KorMedMCQA + + +### Citation + +``` +@article{kweon2024kormedmcqa, + title={KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations}, + author={Sunjun Kweon and Byungjin Choi and Minkyu Kim and Rae Woong Park and Edward Choi}, + journal={arXiv preprint arXiv:2403.01469}, + year={2024} +} +``` + +### Groups and Tasks + +* `kormedmcqa`: Runs `kormedmcqa_doctor`, `kormedmcqa_nurse`, and `kormedmcqa_pharm`. + +#### Tasks + +* `kormedmcqa_doctor`: `Official Korean Doctor Examination` +* `kormedmcqa_nurse`: `Official Korean Nurse Examination` +* `kormedmcqa_pharm`: `Official Korean Pharmacist Examination` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3977569f67d998e27e88f24ac294411e79c6ffdc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml @@ -0,0 +1,27 @@ +group: kormedmcqa +task : kormedmcqa_doctor +dataset_path : sean0042/KorMedMCQA +dataset_name : doctor +test_split : test +fewshot_split : dev +fewshot_config: + sampler: first_n +output_type: generate_until +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:" +doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - " " +generation_kwargs: + until: + - "Q:" + - "\n\n" + - "" + - "." + do_sample: false + temperature: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml new file mode 100644 index 0000000000000000000000000000000000000000..264fb9a765381e97bd1dd60de48b45cbc275f299 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml @@ -0,0 +1,27 @@ +group: kormedmcqa +task : kormedmcqa_nurse +dataset_path : sean0042/KorMedMCQA +dataset_name : nurse +test_split : test +fewshot_split : dev +fewshot_config: + sampler: first_n +output_type: generate_until +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:" +doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - " " +generation_kwargs: + until: + - "Q:" + - "\n\n" + - "" + - "." + do_sample: false + temperature: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd3e6a3103a1afcd9fe54f16817ed5fefa6c2e5e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml @@ -0,0 +1,27 @@ +group: kormedmcqa +task : kormedmcqa_pharm +dataset_path : sean0042/KorMedMCQA +dataset_name : pharm +test_split : test +fewshot_split : dev +fewshot_config: + sampler: first_n +output_type: generate_until +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:" +doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - " " +generation_kwargs: + until: + - "Q:" + - "\n\n" + - "" + - "." + do_sample: false + temperature: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/README.md b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5af16562e07d04a15b5313a2fadc61f1f2680036 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/README.md @@ -0,0 +1,48 @@ +# Multilingual HellaSwag + +### Paper + +Title: `Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback` + +Abstract: https://arxiv.org/abs/2307.16039 + +A key technology for the development of large language models (LLMs) involves instruction tuning that helps align the models' responses with human expectations to realize impressive learning abilities. Two major approaches for instruction tuning characterize supervised fine-tuning (SFT) and reinforcement learning from human feedback (RLHF), which are currently applied to produce the best commercial LLMs (e.g., ChatGPT). To improve the accessibility of LLMs for research and development efforts, various instruction-tuned open-source LLMs have also been introduced recently, e.g., Alpaca, Vicuna, to name a few. However, existing open-source LLMs have only been instruction-tuned for English and a few popular languages, thus hindering their impacts and accessibility to many other languages in the world. Among a few very recent work to explore instruction tuning for LLMs in multiple languages, SFT has been used as the only approach to instruction-tune LLMs for multiple languages. This has left a significant gap for fine-tuned LLMs based on RLHF in diverse languages and raised important questions on how RLHF can boost the performance of multilingual instruction tuning. To overcome this issue, we present Okapi, the first system with instruction-tuned LLMs based on RLHF for multiple languages. Okapi introduces instruction and response-ranked data in 26 diverse languages to facilitate the experiments and development of future multilingual LLM research. We also present benchmark datasets to enable the evaluation of generative LLMs in multiple languages. Our experiments demonstrate the advantages of RLHF for multilingual instruction over SFT for different base models and datasets. Our framework and resources are released at this https URL. + +Homepage: `https://github.com/nlp-uoregon/Okapi` + + +### Citation + +``` +@article{dac2023okapi, + title={Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback}, + author={Dac Lai, Viet and Van Nguyen, Chien and Ngo, Nghia Trung and Nguyen, Thuat and Dernoncourt, Franck and Rossi, Ryan A and Nguyen, Thien Huu}, + journal={arXiv e-prints}, + pages={arXiv--2307}, + year={2023} +} +``` + +### Groups and Tasks + +#### Groups + +- hellaswag_multilingual + +#### Tasks + +- `hellaswag_{ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi}` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml new file mode 100644 index 0000000000000000000000000000000000000000..5be1d03ae07f83b0c9647e8719d7d4a2f8ee64f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml @@ -0,0 +1,21 @@ +group: + - hellaswag_multilingual +dataset_path: null +dataset_name: null +output_type: multiple_choice +training_split: null +validation_split: validation +test_split: null +process_docs: !function utils.process_docs +doc_to_text: "query" +doc_to_target: "{{label.lstrip()}}" +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0607ca9443fd787b14f3652ee79b332f6ba08d97 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ca +dataset_path: alexandrainst/m_hellaswag +dataset_name: ca +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..608f8d5206b71a33db3dbb68f3c84a4f790d7280 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_da +dataset_path: alexandrainst/m_hellaswag +dataset_name: da +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c103a832115bbffd6da684ff1459b22b310c659 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_de +dataset_path: alexandrainst/m_hellaswag +dataset_name: de +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7fdbaae7c26a53ab8c12b71449948dda6653746e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_eu +dataset_path: alexandrainst/m_hellaswag +dataset_name: eu +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0908b8238196caf1069a2683a575c1caa94d4700 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_gu +dataset_path: alexandrainst/m_hellaswag +dataset_name: gu +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2110785501a1c8f0b6dc0c73ffc73a93ba85d92 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_hi +dataset_path: alexandrainst/m_hellaswag +dataset_name: hi +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e4b547b00a486508696cb126e8be6b2af2988c5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_hr +dataset_path: alexandrainst/m_hellaswag +dataset_name: hr +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a00c55231c145705513ab2f9b7d26b77714df530 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_hy +dataset_path: alexandrainst/m_hellaswag +dataset_name: hy +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c3b39fdb27746d4b6721f17b7a1b2a9f990ed11 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_id +dataset_path: alexandrainst/m_hellaswag +dataset_name: id +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97be88b8e3dbefc81ed428d8f0e4675481c607c3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_it +dataset_path: alexandrainst/m_hellaswag +dataset_name: it +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40d924c85e1827c708d19a75935bb06db881c1f1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_kn +dataset_path: alexandrainst/m_hellaswag +dataset_name: kn +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6337b4f682c52be21290e1cb0dd83ce56d82f7b9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ml +dataset_path: alexandrainst/m_hellaswag +dataset_name: ml +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4fbaff49eaf49370c1d900532fbd8a08fb4302e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_mr +dataset_path: alexandrainst/m_hellaswag +dataset_name: mr +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75d12fb26c62dc6984ff225770bea2f1f8b50a43 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ne +dataset_path: alexandrainst/m_hellaswag +dataset_name: ne +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c3ed2e8d6ea4e528fb2a44d523ac00af1ad65ed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_nl +dataset_path: alexandrainst/m_hellaswag +dataset_name: nl +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7082b5a615dbeb4777ebc0e039da03543f2e1d1d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_pt +dataset_path: alexandrainst/m_hellaswag +dataset_name: pt +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04b8d1374755673343f4540eba306ff780d8a03e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ro +dataset_path: alexandrainst/m_hellaswag +dataset_name: ro +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0a10a5e9899385317282a3205be8de64a2d13687 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ru +dataset_path: alexandrainst/m_hellaswag +dataset_name: ru +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b831f755f7cd055a8aacd2ae1d6b39d2a31bc0f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_sk +dataset_path: alexandrainst/m_hellaswag +dataset_name: sk +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9dfae80cf0fc6d2113ce327771a80cd55eeb2dfc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_sr +dataset_path: alexandrainst/m_hellaswag +dataset_name: sr +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ca7d56778850e21252d0e67288c801f0f070df9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_sv +dataset_path: alexandrainst/m_hellaswag +dataset_name: sv +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16d489429063e6d5a65dae9bcd90d55c2d0a594f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_ta +dataset_path: alexandrainst/m_hellaswag +dataset_name: ta +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6722d853e5b9828f09986cac6bec0e55b23d4c57 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml @@ -0,0 +1,6 @@ +include: _hellaswag_yaml +task: hellaswag_vi +dataset_path: alexandrainst/m_hellaswag +dataset_name: vi +training_split: null +validation_split: val diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b526a9e93076f7db54221072d58ca4bd7161ee97 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py @@ -0,0 +1,25 @@ +import re + +import datasets + + +def preprocess(text): + text = text.strip() + # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() + out_doc = { + "query": preprocess(doc["activity_label"] + ": " + ctx), + "choices": [preprocess(ending) for ending in doc["endings"]], + "gold": int(doc["label"]), + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70f6473a859fbf82b10db2cd4cf4d0707c6a5538 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: ar +include: _default_yaml +task: m_mmlu_ar diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95eb1dc9b190fea94281e56b4564cc9e4701a1d8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: da +include: _default_yaml +task: m_mmlu_da diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c6e24d8e16bfeaa96dbaa106889c11106538026 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: hr +include: _default_yaml +task: m_mmlu_hr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml new file mode 100644 index 0000000000000000000000000000000000000000..494b0c10acf484b378c17cb2537660d9d6fdb80b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: is +include: _default_yaml +task: m_mmlu_is diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_it.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_it.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30795d329a290ec78a795aa6bea738b548f237e7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_it.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: it +include: _default_yaml +task: m_mmlu_it diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_kn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82d026c7e4cdc9a58c0df8a360b86d45267ed00b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_kn.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: kn +include: _default_yaml +task: m_mmlu_kn diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_mr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6f6df7f30c9c9555a6a0751e5059e22c130b26c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_mr.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: mr +include: _default_yaml +task: m_mmlu_mr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ce379b61e4e88832b9c4e007188ead2ddcd74fb1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ru.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: ru +include: _default_yaml +task: m_mmlu_ru diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22b0ad7755564491096207d41e964535b9b8cf24 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sr.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: sr +include: _default_yaml +task: m_mmlu_sr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sv.yaml b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sv.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d433d08259c94b14fbe685dcee21f080242e8168 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sv.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: sv +include: _default_yaml +task: m_mmlu_sv diff --git a/lm-evaluation/build/lib/lm_eval/tasks/race/README.md b/lm-evaluation/build/lib/lm_eval/tasks/race/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dfe6c5e8a50da470e22be690e9e10612d830f957 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/race/README.md @@ -0,0 +1,62 @@ +# RACE + +### Paper + +Title: `RACE: Large-scale ReAding Comprehension Dataset From Examinations` + +Abstract: https://arxiv.org/abs/1704.04683 + +RACE is a large-scale reading comprehension dataset with more than 28,000 passages +and nearly 100,000 questions. The dataset is collected from English examinations +in China, which are designed for middle school and high school students. The dataset +can be served as the training and test sets for machine comprehension. + +Homepage: https://www.cs.cmu.edu/~glai1/data/race/ + + +### Citation + +``` +@inproceedings{lai-etal-2017-race, + title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations", + author = "Lai, Guokun and + Xie, Qizhe and + Liu, Hanxiao and + Yang, Yiming and + Hovy, Eduard", + editor = "Palmer, Martha and + Hwa, Rebecca and + Riedel, Sebastian", + booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", + month = sep, + year = "2017", + address = "Copenhagen, Denmark", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/D17-1082", + doi = "10.18653/v1/D17-1082", + pages = "785--794" +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `race` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/race/preprocess_race.py b/lm-evaluation/build/lib/lm_eval/tasks/race/preprocess_race.py new file mode 100644 index 0000000000000000000000000000000000000000..03a214e5747876325d118bf4660b0e5c7e9d5142 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/race/preprocess_race.py @@ -0,0 +1,40 @@ +import ast + + +def process_ast(string): + return ast.literal_eval(string) + + +def last_problem(doc): + return process_ast(doc["problems"])[-1] + + +def get_answer_option(problem): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[problem["answer"]] + return problem["options"][answer] + + +def doc_to_choice(doc): + problem = last_problem(doc) + choices = [problem["options"][i] for i in range(4)] + return choices + + +def doc_to_text(doc): + text = "Article: " + doc["article"] + "\n\n" + for problem in process_ast(doc["problems"])[:-1]: + if problem["question"][-6:] == " _ .": + text += problem["question"][-5:] + get_answer_option(problem) + "\n" + else: + question = "Question: " + problem["question"] + "\n" + answer = "Answer: " + get_answer_option(problem) + "\n" + text += question + answer + text += last_problem(doc)["question"] + return text + + +def doc_to_target(doc): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[last_problem(doc)["answer"]] + return answer diff --git a/lm-evaluation/build/lib/lm_eval/tasks/race/race.yaml b/lm-evaluation/build/lib/lm_eval/tasks/race/race.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b90b809f6120924f398372a454ce4ba74220bbe9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/race/race.yaml @@ -0,0 +1,16 @@ +task: race +dataset_path: EleutherAI/race +dataset_name: high +output_type: multiple_choice +test_split: test +doc_to_text: !function preprocess_race.doc_to_text +doc_to_target: !function preprocess_race.doc_to_target +doc_to_choice: !function preprocess_race.doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/README.md b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c8e807718af5abcec3cbb0ac91af2aab6cb4a3fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/README.md @@ -0,0 +1,77 @@ +# SuperGLUE + +### Paper + +Title: `SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems` +Abstract: `https://w4ngatang.github.io/static/papers/superglue.pdf` + +SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language +understanding tasks. + +Homepage: https://super.gluebenchmark.com/ + +### Citation + +``` +@inproceedings{NEURIPS2019_4496bf24, + author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett}, + pages = {}, + publisher = {Curran Associates, Inc.}, + title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, + url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf}, + volume = {32}, + year = {2019} +} +``` + +### Groups and Tasks + +#### Groups + +* `super-glue-lm-eval-v1`: SuperGLUE eval adapted from LM Eval V1 +* `super-glue-t5-prompt`: SuperGLUE prompt and evaluation that matches the T5 paper (if using accelerate, will error if record is included.) + +#### Tasks + +Comparison between validation split score on T5x and LM-Eval (T5x models converted to HF) +| T5V1.1 Base | SGLUE | BoolQ | CB | Copa | MultiRC | ReCoRD | RTE | WiC | WSC | +| ----------- | ------| ----- | --------- | ---- | ------- | ------ | --- | --- | --- | +| T5x | 69.47 | 78.47(acc) | 83.93(f1) 87.5(acc) | 50(acc) | 73.81(f1) 33.26(em) | 70.09(em) 71.34(f1) | 78.7(acc) | 63.64(acc) | 75(acc) | +| LM-Eval | 71.35 | 79.36(acc) | 83.63(f1) 87.5(acc) | 63(acc) | 73.45(f1) 33.26(em) | 69.85(em) 68.86(f1) | 78.34(acc) | 65.83(acc) | 75.96(acc) | + + + +* `super-glue-lm-eval-v1` + - `boolq` + - `cb` + - `copa` + - `multirc` + - `record` + - `rte` + - `wic` + - `wsc` + +* `super-glue-t5-prompt` + - `super_glue-boolq-t5-prompt` + - `super_glue-cb-t5-prompt` + - `super_glue-copa-t5-prompt` + - `super_glue-multirc-t5-prompt` + - `super_glue-record-t5-prompt` + - `super_glue-rte-t5-prompt` + - `super_glue-wic-t5-prompt` + - `super_glue-wsc-t5-prompt` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f26e4682c40ff7c7ba1183fecaadb5718206dbfd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/default.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-lm-eval-v1 +task: boolq +dataset_path: super_glue +dataset_name: boolq +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: passage +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/seq2seq.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/seq2seq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..569316cb31b909755ba6916dea4e54f80fc95df1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/seq2seq.yaml @@ -0,0 +1,26 @@ +group: + - super-glue-lm-eval-v1-seq2seq +task: "boolq-seq2seq" +dataset_path: super_glue +dataset_name: boolq +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: [' no', ' yes'] +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7089381ad86c05913b111d1888878b721a33a222 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-boolq-t5-prompt +dataset_path: super_glue +dataset_name: boolq +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "boolq passage: {{passage}} question: {{question}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/aggregate.py b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/aggregate.py new file mode 100644 index 0000000000000000000000000000000000000000..4b99849f9bfa8307006879666ecf971b17b511b2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/aggregate.py @@ -0,0 +1,13 @@ +import numpy as np +import sklearn + + +def cb_multi_fi(items): + preds, golds = zip(*items) + preds = np.array(preds) + golds = np.array(golds) + f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0) + f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1) + f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2) + avg_f1 = np.mean([f11, f12, f13]) + return avg_f1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c575e9872aa712eff69f779a7114d5baed487706 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/default.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-lm-eval-v1 +task: cb +dataset_path: super_glue +dataset_name: cb +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}}. True, False, or Neither?\nAnswer:" +doc_to_target: label +doc_to_choice: ['True', 'False', 'Neither'] +metric_list: + - metric: acc + - metric: f1 + aggregation: !function "aggregate.cb_multi_fi" +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5-prompt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..984e17935ad2479fb9d48dabfeb14f14269da2db --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5-prompt.yaml @@ -0,0 +1,25 @@ +group: + - super-glue-t5-prompt +task: super_glue-cb-t5-prompt +dataset_path: super_glue +dataset_name: cb +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "cb hypothesis: {{hypothesis}} premise: {{premise}}" +doc_to_target: label +doc_to_choice: ['entailment', 'contradiction', 'neutral'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - metric: !function "t5_utils.mean_3class_f1" + aggregation: !function "t5_utils.agg_mean_3class_f1" + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec02e34538e15f71861f354b437060da5390544e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -0,0 +1,30 @@ +import sklearn.metrics + + +def mean_3class_f1(predictions, references): # This is a passthrough function + string_label = ["entailment", "contradiction", "neutral"] + predictions = ( + string_label.index(predictions[0]) if predictions[0] in string_label else 0 + ) + references = string_label.index(references[0]) + + return (predictions, references) + + +def agg_mean_3class_f1(items): + predictions, references = zip(*items) + + """Computes the unweighted average of the F1 per class.""" + metric_str = "fbeta_score" + metric_fn_kwargs = { + "beta": 1, + "labels": range(3), + "average": "macro", + } + + def _fn(predictions, references): + metric_fn = getattr(sklearn.metrics, metric_str) + metric_val = metric_fn(references, predictions, **metric_fn_kwargs) + return metric_val + + return _fn(predictions, references) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1af5dbf47258e203e7a1b506e7ba6e91351a61e4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: copa +dataset_path: super_glue +dataset_name: copa +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/t5-prompt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20a90db98d28a78307b7e46b99834eaf98cc3f9e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-copa-t5-prompt +dataset_path: super_glue +dataset_name: copa +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} premise: {{premise}} question: {{question}}" +doc_to_target: label +doc_to_choice: ['choice1', 'choice2'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3afc868eb486c47c51b0036ce955502bc377c9c4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/copa/utils.py @@ -0,0 +1,21 @@ +def convert_choice(choice): + return choice[0].lower() + choice[1:] + + +def doc_to_text(doc): + # Drop the period + connector = { + "cause": "because", + "effect": "therefore", + }[doc["question"]] + return doc["premise"].strip()[:-1] + f" {connector}" + + +def doc_to_target(doc): + correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"] + # Connect the sentences + return " " + convert_choice(correct_choice) + + +def doc_to_choice(doc): + return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])] diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a388299f6496673a3edc9c5047fddd1a14302e4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: multirc +dataset_path: super_glue +dataset_name: multirc +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{paragraph}}\nQuestion: {{question}}\nAnswer:" +doc_to_target: label +doc_to_choice: "['''{{answer}}\\nIs the answer correct? yes''', '''{{answer}}\\nIs the answer correct? no''']" +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..927a357158abf96502f955470fcd8afbe0eee49c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -0,0 +1,23 @@ +group: + - super-glue-t5-prompt +task: super_glue-multirc-t5-prompt +dataset_path: super_glue +dataset_name: multirc +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}" +doc_to_target: label +doc_to_choice: "{% set group_id = idx.question|string %}{{[group_id+'_False', group_id+'_True']}}" +generation_kwargs: + until: + - "" +metric_list: + - metric: !function t5_utils.f1 + aggregation: !function t5_utils.agg_f1 + higher_is_better: true + - metric: !function t5_utils.em + aggregation: !function t5_utils.agg_em + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d17d498fa25db9a6d7f56e03c43c9e661d66f9f1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -0,0 +1,53 @@ +import collections + +import numpy as np +import sklearn.metrics + + +def f1(predictions, references): # This is a passthrough function + _prediction = predictions[0] + _reference = references[0].split("_")[-1] + string_label = ["False", "True"] + reference = string_label.index(_reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) + + return (prediction, reference) + + +def agg_f1(items): + predictions, references = zip(*items) + references, predictions = np.asarray(references), np.asarray(predictions) + + return sklearn.metrics.f1_score(references, predictions) + + +def em(predictions, references): # This is a passthrough function + _prediction = predictions[0] + _group, _reference = references[0].split("_") + string_label = ["False", "True"] + reference = string_label.index(_reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) + + return (_group, prediction, reference) + + +def agg_em(items): + grouped_values = collections.defaultdict(lambda: ([], [])) + for group, prediction, reference in items: + grouped_values[group][0].append(reference) + grouped_values[group][1].append(prediction) + + group_scores = [] + for group, (targets, predictions) in grouped_values.items(): + score = float(np.array_equal(targets, predictions)) + group_scores.append(score) + + return np.mean(group_scores) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f86855a7811ca1e2c11f61201237f8d10ed524c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: "wic" +dataset_path: super_glue +dataset_name: wic +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Is the word '{{sentence1[start1:end1]}}' used in the same way in the two sentences above?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/t5-prompt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a0dbb2f7fd64f2ec3ae3e6d58c4dd7e0963edc2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/super_glue/wic/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-wic-t5-prompt +dataset_path: super_glue +dataset_name: wic +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0