diff --git a/lm-evaluation/build/lib/lm_eval/tasks/csatqa/_default_csatqa_yaml b/lm-evaluation/build/lib/lm_eval/tasks/csatqa/_default_csatqa_yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4a5db84b54de024afd6dfe5be71d12c73d2067a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/csatqa/_default_csatqa_yaml @@ -0,0 +1,17 @@ +group: csatqa +dataset_path: EleutherAI/csatqa +test_split: test +output_type: multiple_choice +process_docs: !function utils.process_docs +doc_to_text: "{{question}}" +doc_to_choice: "{{choices}}" +doc_to_target: "{{gold}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/csatqa/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/csatqa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..253bc1b6e4c1adeb26c230aca02890d8911e9088 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/csatqa/utils.py @@ -0,0 +1,20 @@ +import datasets + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + instruction = f"""다음을 읽고 정답으로 알맞은 것을 고르시요. +### Context: {doc["context"]} +### Question: {doc["question"]} +### Options: +(1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']} +### Answer: 주어진 문제의 정답은""" + + out_doc = { + "question": instruction, + "choices": ["(1)", "(2)", "(3)", "(4)", "(5)"], + "gold": int(doc["gold"]) - 1, + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..bda00784cc2fa26b5f0d488cf7b6aea37243353d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + setting = "cot_zeroshot" + for task in tqdm(subset): + file_name = f"gpqa_{task}_{setting}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": f"_gpqa_{setting}_yaml", + "task": f"gpqa_{task}_{setting}", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..df99f272c99a343d4250c44e3618f85e9e2a0682 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml @@ -0,0 +1,38 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: generate_until +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: " +doc_to_target: answer +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "multi_choice_regex" + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +num_fewshot: 0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e6a840fa1815096f5fa180ed06223e3523a06214 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_cot_zeroshot_yaml +task: gpqa_diamond_cot_zeroshot diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96bcd52b140fd0a5896f55c0a52ea2fd5453fd53 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/utils.py @@ -0,0 +1,39 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + random.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "choices": [choices[0], choices[1], choices[2], choices[3]], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/README.md b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7fde78fdaa3e27f4d03fca6c45ca35160c351147 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/README.md @@ -0,0 +1,54 @@ +# k_mmlu + +### Paper + +Title: `KMMLU : Measuring Massive Multitask Language Understanding in Korean` + +Abstract: `We propose KMMLU, a new Korean benchmark with 35,030 expert-level multiple-choice questions across 45 subjects ranging from humanities to STEM. Unlike previous Korean benchmarks that are translated from existing English benchmarks, KMMLU is collected from original Korean exams, capturing linguistic and cultural aspects of the Korean language. We test 26 publicly available and proprietary LLMs, identifying significant room for improvement. The best publicly available model achieves 50.54% on KMMLU, far below the average human performance of 62.6%. This model was primarily trained for English and Chinese, not Korean. Current LLMs tailored to Korean, such as Polyglot-Ko, perform far worse. Surprisingly, even the most capable proprietary LLMs, e.g., GPT-4 and HyperCLOVA X, achieve 59.95% and 53.40%, respectively. This suggests that further work is needed to improve Korean LLMs, and KMMLU offers the right tool to track this progress. We make our dataset publicly available on the Hugging Face Hub and integrate the benchmark into EleutherAI's Language Model Evaluation Harness.` + +Note: lm-eval-harness is using the micro average as the default. To replicate the test results in the paper, take the macro average for the scores evaluated with lm-eval-harness + +Homepage: https://huggingface.co/datasets/HAERAE-HUB/KMMLU + +### Citation + +@article{son2024kmmlu, + title={KMMLU: Measuring Massive Multitask Language Understanding in Korean}, + author={Guijin Son and Hanwool Lee and Sungdong Kim and Seungone Kim and Niklas Muennighoff and Taekyoon Choi and Cheonbok Park and Kang Min Yoo and Stella Biderman}, + journal={arXiv preprint arXiv:2402.11548}, + year={2024} +} + +### Groups and Tasks + +#### Groups + +* `kmmlu`: 'All 45 subjects of the KMMLU dataset, evaluated following the methodology in MMLU's original implementation' +* `kmmlu_direct`: 'kmmlu_direct solves questions using a straightforward *generative* multiple-choice question-answering approach' +* `kmmlu_hard`: 'kmmlu_hard comprises difficult questions that at least one proprietary model failed to answer correctly using log-likelihood approach' +* `kmmlu_hard_direct`: 'kmmlu_hard_direct solves questions of kmmlu_hard using direct(generative) approach' +* `kmmlu_hard_cot`: 'kmmlu_hard_cot includes 5-shot of exemplars for chain-of-thought approach' + +#### Tasks + +The following tasks evaluate subjects in the KMMLU dataset +- `kmmlu_direct_{subject_english}` + +The following tasks evaluate subjects in the KMMLU-Hard dataset +- `kmmlu_hard_{subject_english}` +- `kmmlu_hard_cot_{subject_english}` +- `kmmlu_hard_direct_{subject_english}` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebe1765b34a3fe774d45869552d0f69e80285896 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml @@ -0,0 +1,3 @@ +dataset_name: Biology +include: _direct_kmmlu_yaml +task: kmmlu_direct_biology diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5875bb7e8be076e5f7a1076b01b21bf308b5acd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Chemical-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_chemical_engineering diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..edabfb67dd089798dcc001db737136e55eed0efe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml @@ -0,0 +1,3 @@ +dataset_name: Chemistry +include: _direct_kmmlu_yaml +task: kmmlu_direct_chemistry diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c546e738d68db7e281b5d70bbf9771bced6c1300 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml @@ -0,0 +1,3 @@ +dataset_name: Computer-Science +include: _direct_kmmlu_yaml +task: kmmlu_direct_computer_science diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9dfdfabc5971164a63fe651c66f4c0842598ef17 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml @@ -0,0 +1,3 @@ +dataset_name: Criminal-Law +include: _direct_kmmlu_yaml +task: kmmlu_direct_criminal_law diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..db4d78405a6079273f8042350fd4f785c9fe4bed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml @@ -0,0 +1,3 @@ +dataset_name: Economics +include: _direct_kmmlu_yaml +task: kmmlu_direct_economics diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3455d50715d250762358c9db89f05a0c8eb521c3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Electrical-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_electrical_engineering diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4fb806b3808d2cb47ea68534030b9432e998b74 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml @@ -0,0 +1,3 @@ +dataset_name: Energy-Management +include: _direct_kmmlu_yaml +task: kmmlu_direct_energy_management diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1670ff16bae6d41096f2b9c86f8361455f4c347e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml @@ -0,0 +1,3 @@ +dataset_name: Environmental-Science +include: _direct_kmmlu_yaml +task: kmmlu_direct_environmental_science diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f49b087fc288187a9a3363260a17bda1a68ce9bb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml @@ -0,0 +1,3 @@ +dataset_name: Food-Processing +include: _direct_kmmlu_yaml +task: kmmlu_direct_food_processing diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f0d77eb78a61cd2b7b00b80311b59b011abc47e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml @@ -0,0 +1,3 @@ +dataset_name: Health +include: _direct_kmmlu_yaml +task: kmmlu_direct_health diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39ea0bcf054c6dfef197beef942a16feffca338b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml @@ -0,0 +1,3 @@ +dataset_name: Industrial-Engineer +include: _direct_kmmlu_yaml +task: kmmlu_direct_industrial_engineer diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1aa277a70d03a617e673c27bba1cc2d7440d156 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml @@ -0,0 +1,3 @@ +dataset_name: Korean-History +include: _direct_kmmlu_yaml +task: kmmlu_direct_korean_history diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..602f8982f6ca939766cf0d87f0546eef5a4452de --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml @@ -0,0 +1,3 @@ +dataset_name: Law +include: _direct_kmmlu_yaml +task: kmmlu_direct_law diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfb923c2a9ac76515f3796a5a8c73770ed9fc586 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml @@ -0,0 +1,3 @@ +dataset_name: Machine-Design-and-Manufacturing +include: _direct_kmmlu_yaml +task: kmmlu_direct_machine_design_and_manufacturing diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7352a1360b2a0cb32a85e88351cccfad62c142d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml @@ -0,0 +1,3 @@ +dataset_name: Management +include: _direct_kmmlu_yaml +task: kmmlu_direct_management diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa0c8f319f35d3343ec4cd5b3be8247fa8fe3e61 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Maritime-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_maritime_engineering diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c5d28af05edd5bb5c3c9207930c1994068ce1fe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml @@ -0,0 +1,3 @@ +dataset_name: Math +include: _direct_kmmlu_yaml +task: kmmlu_direct_math diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a253535adb6c44a8fa8340b106539205cbe6c689 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Mechanical-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_mechanical_engineering diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b8dc7e7845394754ede20b72534fe889c7c564f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml @@ -0,0 +1,3 @@ +dataset_name: Nondestructive-Testing +include: _direct_kmmlu_yaml +task: kmmlu_direct_nondestructive_testing diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2afff2c373a4e5a201a233de96d71baf6d980937 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml @@ -0,0 +1,3 @@ +dataset_name: Patent +include: _direct_kmmlu_yaml +task: kmmlu_direct_patent diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2209abbf05d8f78017fdcdc6b4178d5c48a2305a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml @@ -0,0 +1,3 @@ +dataset_name: Political-Science-and-Sociology +include: _direct_kmmlu_yaml +task: kmmlu_direct_political_science_and_sociology diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5bb16a90d1f5303b919e8f348b3eb79a9f7cf296 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml @@ -0,0 +1,3 @@ +dataset_name: Public-Safety +include: _direct_kmmlu_yaml +task: kmmlu_direct_public_safety diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a13204a23bbb4be1de93fceb697cb37d8319ae6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Railway-and-Automotive-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_railway_and_automotive_engineering diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44f9e428bbd8d8c7eb33617a6498d2856a6e1c1a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml @@ -0,0 +1,3 @@ +dataset_name: Refrigerating-Machinery +include: _direct_kmmlu_yaml +task: kmmlu_direct_refrigerating_machinery diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69e71d6dfa6284cc701221c5c187969be5e92832 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml @@ -0,0 +1,3 @@ +dataset_name: Taxation +include: _direct_kmmlu_yaml +task: kmmlu_direct_taxation diff --git a/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4d1fd05c876bf269c0aae1f3590f8801f7e9955 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml @@ -0,0 +1,3 @@ +dataset_name: Telecommunications-and-Wireless-Technology +include: _direct_kmmlu_yaml +task: kmmlu_direct_telecommunications_and_wireless_technology diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qasper/README.md b/lm-evaluation/build/lib/lm_eval/tasks/qasper/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ada111e1ca7b0df493182939960559bdeb96b9f2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qasper/README.md @@ -0,0 +1,63 @@ +# QASPER + +### Paper + +Title: `A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers` + +Abstract: https://arxiv.org/abs/2105.03011 + +QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers. +Each question is written by an NLP practitioner who read only the title and abstract +of the corresponding paper, and the question seeks information present in the full +text. The questions are then answered by a separate set of NLP practitioners who also +provide supporting evidence to answers. + +Homepage: https://allenai.org/data/qasper + +### Citation + +``` +@article{DBLP:journals/corr/abs-2105-03011, + author = {Pradeep Dasigi and + Kyle Lo and + Iz Beltagy and + Arman Cohan and + Noah A. Smith and + Matt Gardner}, + title = {A Dataset of Information-Seeking Questions and Answers Anchored in + Research Papers}, + journal = {CoRR}, + volume = {abs/2105.03011}, + year = {2021}, + url = {https://arxiv.org/abs/2105.03011}, + eprinttype = {arXiv}, + eprint = {2105.03011}, + timestamp = {Fri, 14 May 2021 12:13:30 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +### Groups and Tasks + +#### Groups + +* `qasper`: executes both `qasper_bool` and `qasper_freeform` + +#### Tasks + +* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` +* `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qasper/bool.yaml b/lm-evaluation/build/lib/lm_eval/tasks/qasper/bool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17d3f1be983043ac2ca93038ed29e94c90028592 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qasper/bool.yaml @@ -0,0 +1,14 @@ +group: qasper +task: qasper_bool +dataset_path: allenai/qasper +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_bool +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: 1 +doc_to_choice: ["no", "yes"] +metric_list: + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qasper/freeform.yaml b/lm-evaluation/build/lib/lm_eval/tasks/qasper/freeform.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed7a4bc47274f09eb0f52df04723a011e2db13f0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qasper/freeform.yaml @@ -0,0 +1,18 @@ +group: qasper +task: qasper_freeform +dataset_path: allenai/qasper +output_type: generate_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_freeform +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: answer +generation_kwargs: + until: + - "\n" +metric_list: + - metric: !function metrics.f1_abstractive + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qasper/metrics.py b/lm-evaluation/build/lib/lm_eval/tasks/qasper/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..cc832912250ae45a4637daaac3f278d0da654ce1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qasper/metrics.py @@ -0,0 +1,41 @@ +import re +import string +from collections import Counter + + +def normalize_answer(s): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + Lower text and remove punctuation, articles and extra whitespace. + """ + + def remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def f1_abstractive(predictions, references): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + """ + prediction_tokens = normalize_answer(predictions[0]).split() + references_tokens = normalize_answer(references[0]).split() + common = Counter(prediction_tokens) & Counter(references_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(references_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/qasper/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/qasper/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fb3d4c55cf7e16a1d2c527510b8ae48d0d3b05fa --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/qasper/utils.py @@ -0,0 +1,72 @@ +from functools import partial + +from datasets import Dataset + + +def process_docs(dataset, set_answer_type="bool"): + FEATURES = ["title", "abstract", "question", "answer", "answer_type"] + + def _categorise_answer(answer_blob): + if answer_blob["unanswerable"]: + answer = "unanswerable" + answer_type = "unanswerable" + return answer, answer_type + elif answer_blob["yes_no"]: + answer = "yes" + answer_type = "bool" + return answer, answer_type + elif answer_blob["free_form_answer"]: + answer = answer_blob["free_form_answer"] + answer_type = "free form answer" + return answer, answer_type + elif answer_blob["extractive_spans"]: + answer = answer_blob["extractive_spans"] + answer_type = "extractive_spans" + return answer, answer_type + elif answer_blob["yes_no"] is False: + answer = "no" + answer_type = "bool" + return answer, answer_type + + def _flatten(doc): + """Given a `doc`, flatten it out so that each JSON blob + contains exactly one question and one answer. Logic taken from + the reference implementation available at + https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py + """ + obs_list = { + "title": [], + "abstract": [], + "question": [], + "answer": [], + "answer_type": [], + } + title = doc.pop("title") + abstract = doc.pop("abstract") + for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]): + for answer_blob in answer_list["answer"]: + answer, answer_type = _categorise_answer(answer_blob) + if answer_type == set_answer_type: + obs_list["title"].append(title) + obs_list["abstract"].append(abstract) + obs_list["question"].append(question) + obs_list["answer_type"].append(answer_type) + if isinstance(answer, list): + answer = ", ".join(answer) + obs_list["answer"].append(answer) + + return obs_list + + dataset = dataset.map( + _flatten, + remove_columns=[key for key in dataset.features.keys() if key not in FEATURES], + ) + new_dataset = {} + for key in dataset.features.keys(): + new_dataset[key] = [x for row in dataset[key] for x in row] + + return Dataset.from_dict(new_dataset) + + +process_docs_bool = partial(process_docs, set_answer_type="bool") +process_docs_freeform = partial(process_docs, set_answer_type="free form answer") diff --git a/lm-evaluation/build/lib/lm_eval/tasks/siqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/siqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca58844b90079a607dd1a6a8a049106c26f57deb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/siqa/README.md @@ -0,0 +1,37 @@ +# Social IQA + +### Paper + +Title: Social IQA: Commonsense Reasoning about Social Interactions + +Abstract: https://arxiv.org/abs/1904.09728 + +> We introduce Social IQa, the first largescale benchmark for commonsense reasoning about social situations. Social IQa contains 38,000 multiple choice questions for probing emotional and social intelligence in a variety of everyday situations (e.g., Q: "Jordan wanted to tell Tracy a secret, so Jordan leaned towards Tracy. Why did Jordan do this?" A: "Make sure no one else could hear"). Through crowdsourcing, we collect commonsense questions along with correct and incorrect answers about social interactions, using a new framework that mitigates stylistic artifacts in incorrect answers by asking workers to provide the right answer to a different but related question. Empirical results show that our benchmark is challenging for existing question-answering models based on pretrained language models, compared to human performance (>20% gap). Notably, we further establish Social IQa as a resource for transfer learning of commonsense knowledge, achieving state-of-the-art performance on multiple commonsense reasoning tasks (Winograd Schemas, COPA). + +Homepage: https://allenai.org/data/socialiqa + + +### Citation + +``` +@inproceedings{sap2019social, + title={Social IQa: Commonsense Reasoning about Social Interactions}, + author={Sap, Maarten and Rashkin, Hannah and Chen, Derek and Le Bras, Ronan and Choi, Yejin}, + booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, + pages={4463--4473}, + year={2019} +} +``` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [X] Is the task an existing benchmark in the literature? + * [X] Have you referenced the original paper that introduced the task? + * [X] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? The original paper doesn't have an associated implementation, but there is an official entry in [BigBench](https://github.com/google/BIG-bench/tree/main/bigbench/benchmark_tasks/social_iqa). I use the same prompting format as BigBench. + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/siqa/siqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/siqa/siqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..191ffa8d30bae64d4039b235ed857ba5106f3b65 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/siqa/siqa.yaml @@ -0,0 +1,19 @@ +task: social_iqa +dataset_path: social_i_qa +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Q: {{context}} {{question}}\nA:" +target_delimiter: " " +doc_to_choice: + - "{{answerA}}" + - "{{answerB}}" + - "{{answerC}}" +doc_to_target: "{{ (label|int) - 1 }}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_ar-en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_ar-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea713393c1dfbe9f7e1f6d055dd4768ace31269e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_ar-en.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Arabic phrase: {{translation["ar"]}} + + English phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-ar-en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_en-ar.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_en-ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..891ad50fd6fb60fdb8f21f9004857d739a15640f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_en-ar.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["ar"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Arabic phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-en-ar diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/translation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f30c4d86259259a325edcee3b64ad3199b966c96 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/utils.py @@ -0,0 +1,118 @@ +import argparse + +import yaml + + +try: + import pycountry +except ModuleNotFoundError: + raise Exception( + "`pycountry` is required for generating translation task prompt templates. \ +please install pycountry via pip install lm-eval[multilingual] or pip install -e .[multilingual]", + ) + + +# Different translation benchmarks included in the library. Mostly WMT. +# These correspond to dataset names (subsets) on HuggingFace for each dataset. +# A yaml file is generated by this script for each language pair. + +gpt3_translation_benchmarks = { + "wmt14": ["fr-en"], # ["en-fr", "fr-en"], # French + "wmt16": [ + "ro-en", + "de-en", + ], # ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian +} + +# 28 total +LANGUAGES = { + **gpt3_translation_benchmarks, + # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"), + "iwslt2017": ["en-ar"], # Arabic +} + + +def code_to_language(code): + # key is alpha_2 or alpha_3 depending on the code length + language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code}) + return language_tuple.name + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + for dataset_name in LANGUAGES[lang]: + src_lang, _, tgt_lang = dataset_name.partition("-") + for src, tgt in [[src_lang, tgt_lang], [tgt_lang, src_lang]]: + # both translation directions for each lang pair + lang_pair = src + "-" + tgt + file_name = f"{lang}_{lang_pair}.yaml" + try: + source, target = code_to_language(src), code_to_language(tgt) + + groups = ["generate_until", "translation", lang] + if lang in gpt3_translation_benchmarks.keys(): + groups += ["gpt3_translation_benchmarks"] + + with open( + f"{output_dir}/{file_name}", + "w" if overwrite else "x", + encoding="utf8", + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "wmt_common_yaml", + "group": groups, + "dataset_path": lang, + "dataset_name": dataset_name + if not (lang == "iwslt2017") + else "iwslt2017-" + dataset_name, + "task": f"{lang}-{lang_pair}", + "doc_to_text": f"{source} phrase: " + + "{{translation[" + + f'"{src}"' + + "]}}\n" + + f"{target} phrase:", + "doc_to_target": " {{" + + "translation[" + + f'"{tgt}"]' + + "}}", + }, + f, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt14_fr-en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt14_fr-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09ddd57d6049c29f35150aa4de94c6db3604a0a4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt14_fr-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'French phrase: {{translation["fr"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-fr-en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_de-en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_de-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23d50e4aacc8c4e19a8b282e4051e80ec18edf29 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_de-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'German phrase: {{translation["de"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-de-en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d391b6c6b879c15f0c8d63119824647ea6997c3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-de.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["de"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + German phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-de diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-ro.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-ro.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45a8cae11824bd726064448422f021ec73d7ce87 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-ro.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["ro"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Romanian phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-ro diff --git a/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_ro-en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_ro-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39441eac1c8cb2a8ec4d4e9c9b31402607a5ea77 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_ro-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Romanian phrase: {{translation["ro"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-ro-en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/README.md b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/README.md new file mode 100644 index 0000000000000000000000000000000000000000..674974a79c2be292486b5cc2181e7695f630de20 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/README.md @@ -0,0 +1,57 @@ +# Unscramble + +### Paper + +Language Models are Few-Shot Learners +https://arxiv.org/pdf/2005.14165.pdf + +Unscramble is a small battery of 5 “character manipulation” tasks. Each task +involves giving the model a word distorted by some combination of scrambling, +addition, or deletion of characters, and asking it to recover the original word. + +Homepage: https://github.com/openai/gpt-3/tree/master/data + + +### Citation + +``` +@inproceedings{NEURIPS2020_1457c0d6, + author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {1877--1901}, + publisher = {Curran Associates, Inc.}, + title = {Language Models are Few-Shot Learners}, + url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf}, + volume = {33}, + year = {2020} +} +``` + +### Groups and Tasks + +#### Groups + +* `unscramble` + +#### Tasks + +* `anagrams1` - Anagrams of all but the first and last letter. +* `anagrams2` - Anagrams of all but the first and last 2 letters. +* `cycle_letters` - Cycle letters in a word. +* `random_insertion` - Random insertions in the word that must be removed. +* `reversed_words` - Words spelled backwards that must be reversed. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? + * [x] Checked for equivalence with v0.3.0 LM Evaluation Harness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams1.yaml b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..392fc78f94a15d7b4d0643e1dc414c9748280b0c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams1.yaml @@ -0,0 +1,22 @@ +group: + - unscramble +task: anagrams1 +dataset_path: EleutherAI/unscramble +dataset_name: mid_word_1_anagrams +output_type: generate_until +test_split: validation +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +generation_kwargs: + until: + - "\n" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: false + ignore_punctuation: false +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f1dbe6fb5fb25d45475babe4e6c3771d9c89fe9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/anagrams2.yaml @@ -0,0 +1,22 @@ +group: + - unscramble +task: anagrams2 +dataset_path: EleutherAI/unscramble +dataset_name: mid_word_2_anagrams +output_type: generate_until +test_split: validation +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +generation_kwargs: + until: + - "\n" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: false + ignore_punctuation: false +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/cycle_letters.yaml b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/cycle_letters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc7ccf977cb0b5f8e372739a2c53fa4bbf5c92d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/cycle_letters.yaml @@ -0,0 +1,22 @@ +group: + - unscramble +task: cycle_letters +dataset_path: EleutherAI/unscramble +dataset_name: cycle_letters_in_word +output_type: generate_until +test_split: validation +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +generation_kwargs: + until: + - "\n" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: false + ignore_punctuation: false +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/random_insertion.yaml b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/random_insertion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..189c2415f12a49334dd18e6a6c63c36cfc9300f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/random_insertion.yaml @@ -0,0 +1,22 @@ +group: + - unscramble +task: random_insertion +dataset_path: EleutherAI/unscramble +dataset_name: random_insertion_in_word +output_type: generate_until +test_split: validation +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +generation_kwargs: + until: + - "\n" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: false + ignore_punctuation: false +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/unscramble/reversed_words.yaml b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/reversed_words.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d1bc37dc57b1ae4a3e4608c643e63a6b97dd49c7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/unscramble/reversed_words.yaml @@ -0,0 +1,20 @@ +group: + - unscramble +task: reversed_words +dataset_path: EleutherAI/unscramble +dataset_name: reversed_words +output_type: generate_until +test_split: validation +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +generation_kwargs: + until: + - "\n" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: false + ignore_punctuation: false +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/winogrande/README.md b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d763dffc02ada2e9c619e3ab74423f81dd368d8a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/README.md @@ -0,0 +1,54 @@ +# WinoGrande + +### Paper + +Title: `WinoGrande: An Adversarial Winograd Schema Challenge at Scale` + +Abstract: https://arxiv.org/abs/1907.10641 + +WinoGrande is a collection of 44k problems, inspired by Winograd Schema Challenge +(Levesque, Davis, and Morgenstern 2011), but adjusted to improve the scale and +robustness against the dataset-specific bias. Formulated as a fill-in-a-blank +task with binary options, the goal is to choose the right option for a given +sentence which requires commonsense reasoning. + +NOTE: This evaluation of Winogrande uses partial evaluation as described by +Trinh & Le in Simple Method for Commonsense Reasoning (2018). +See: https://arxiv.org/abs/1806.02847 + +Homepage: https://leaderboard.allenai.org/winogrande/submissions/public + + +### Citation + +``` +@article{sakaguchi2019winogrande, + title={WinoGrande: An Adversarial Winograd Schema Challenge at Scale}, + author={Sakaguchi, Keisuke and Bras, Ronan Le and Bhagavatula, Chandra and Choi, Yejin}, + journal={arXiv preprint arXiv:1907.10641}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `winogrande` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/winogrande/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..213f0727fea6ef8d5b6f87a78f093de89b6f80f6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/default.yaml @@ -0,0 +1,17 @@ +task: winogrande +dataset_path: winogrande +dataset_name: winogrande_xl +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function preprocess_winogrande.doc_to_text +doc_to_target: !function preprocess_winogrande.doc_to_target +doc_to_choice: !function preprocess_winogrande.doc_to_choice +should_decontaminate: true +doc_to_decontamination_query: sentence +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/winogrande/preprocess_winogrande.py b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/preprocess_winogrande.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2076a762905cd151db382ec78109795975d74f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/winogrande/preprocess_winogrande.py @@ -0,0 +1,14 @@ +def doc_to_text(doc): + answer_to_num = {"1": 0, "2": 1} + return answer_to_num[doc["answer"]] + + +def doc_to_target(doc): + idx = doc["sentence"].index("_") + 1 + return doc["sentence"][idx:].strip() + + +def doc_to_choice(doc): + idx = doc["sentence"].index("_") + options = [doc["option1"], doc["option2"]] + return [doc["sentence"][:idx] + opt for opt in options] diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wmdp/README.md b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6074d47102d60dbf6acc4408eb64ee4d379559f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/README.md @@ -0,0 +1,50 @@ +# WMDP + +### Paper + +Title: `The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning` + +Abstract: `https://arxiv.org/abs/2403.03218` + +`The Weapons of Mass Destruction Proxy (WMDP) benchmark is a dataset of 4,157 multiple-choice questions surrounding hazardous knowledge in biosecurity cybersecurity, and chemical security. WMDP serves as both a proxy evaluation for hazardous knowledge in large language models (LLMs) and a benchmark for unlearning methods to remove such knowledge.` + +Homepage: https://wmdp.ai + + +### Citation + +``` +@misc{li2024wmdp, + title={The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning}, + author={Nathaniel Li and Alexander Pan and Anjali Gopal and Summer Yue and Daniel Berrios and Alice Gatti and Justin D. Li and Ann-Kathrin Dombrowski and Shashwat Goel and Long Phan and Gabriel Mukobi and Nathan Helm-Burger and Rassin Lababidi and Lennart Justen and Andrew B. Liu and Michael Chen and Isabelle Barrass and Oliver Zhang and Xiaoyuan Zhu and Rishub Tamirisa and Bhrugu Bharathi and Adam Khoja and Zhenqi Zhao and Ariel Herbert-Voss and Cort B. Breuer and Andy Zou and Mantas Mazeika and Zifan Wang and Palash Oswal and Weiran Liu and Adam A. Hunt and Justin Tienken-Harder and Kevin Y. Shih and Kemper Talley and John Guan and Russell Kaplan and Ian Steneker and David Campbell and Brad Jokubaitis and Alex Levinson and Jean Wang and William Qian and Kallol Krishna Karmakar and Steven Basart and Stephen Fitz and Mindy Levine and Ponnurangam Kumaraguru and Uday Tupakula and Vijay Varadharajan and Yan Shoshitaishvili and Jimmy Ba and Kevin M. Esvelt and Alexandr Wang and Dan Hendrycks}, + year={2024}, + eprint={2403.03218}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +* `wmdp`: All 4,157 multiple-choice questions in biosecurity, cybersecurity, and chemical security + +#### Tasks + +* `wmdp_bio`: 1,520 multiple-choice questions in biosecurity +* `wmdp_cyber`: 2,225 multiple-choice questions in cybersecurity +* `wmdp_chemistry`: 412 multiple-choice questions in chemical security + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wmdp/_default_template_yaml b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e9d1c804bc2e248feaa1d132de6f0279f032d0c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/_default_template_yaml @@ -0,0 +1,16 @@ +dataset_path: cais/wmdp +group: wmdp +test_split: test +training_split: null +validation_split: null +num_fewshot: 0 +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_bio.yaml b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_bio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1096b6f873048709ea16b189c3a244856a2272c0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_bio.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_bio" +"dataset_name": "wmdp-bio" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about biology.\n\n" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_chem.yaml b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_chem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..788d6d618bb6f7328841374b2a98a675f9f51849 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_chem.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_chem" +"dataset_name": "wmdp-chem" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about chemistry.\n\n" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_cyber.yaml b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_cyber.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cac9ba825d719ac7a651ba24443ee6d7fa22567f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wmdp/wmdp_cyber.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_cyber" +"dataset_name": "wmdp-cyber" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about cybersecurity.\n\n" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/README.md b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3acbde5fc2c11eaaba4eeaaa3858b88d72c645bf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/README.md @@ -0,0 +1,84 @@ +# XStoryCloze + +### Paper + +Title: `Few-shot Learning with Multilingual Language Models` + +Abstract: https://arxiv.org/abs/2112.10668 + +XStoryCloze consists of the professionally translated version of the [English StoryCloze dataset](https://cs.rochester.edu/nlp/rocstories/) (Spring 2016 version) to 10 non-English languages. This dataset is released by Meta AI. + +Homepage: https://github.com/facebookresearch/fairseq/pull/4820 + + +### Citation + +``` +@article{DBLP:journals/corr/abs-2112-10668, + author = {Xi Victoria Lin and + Todor Mihaylov and + Mikel Artetxe and + Tianlu Wang and + Shuohui Chen and + Daniel Simig and + Myle Ott and + Naman Goyal and + Shruti Bhosale and + Jingfei Du and + Ramakanth Pasunuru and + Sam Shleifer and + Punit Singh Koura and + Vishrav Chaudhary and + Brian O'Horo and + Jeff Wang and + Luke Zettlemoyer and + Zornitsa Kozareva and + Mona T. Diab and + Veselin Stoyanov and + Xian Li}, + title = {Few-shot Learning with Multilingual Language Models}, + journal = {CoRR}, + volume = {abs/2112.10668}, + year = {2021}, + url = {https://arxiv.org/abs/2112.10668}, + eprinttype = {arXiv}, + eprint = {2112.10668}, + timestamp = {Tue, 04 Jan 2022 15:59:27 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-2112-10668.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +### Groups and Tasks + +#### Groups + +* `xstorycloze` + +#### Tasks + +* `xstorycloze_ar`: Arabic +* `xstorycloze_en`: English +* `xstorycloze_es`: Spanish +* `xstorycloze_eu`: Basque +* `xstorycloze_hi`: Hindi +* `xstorycloze_id`: Indonesian +* `xstorycloze_my`: Burmese +* `xstorycloze_ru`: Russian +* `xstorycloze_sw`: Swahili +* `xstorycloze_te`: Telugu +* `xstorycloze_zh`: Chinese + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ar.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a52966d5a76138be4821d38c5bd639701586061 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ar.yaml @@ -0,0 +1,18 @@ +group: xstorycloze +task: xstorycloze_ar +dataset_path: juletxara/xstory_cloze +dataset_name: ar +output_type: multiple_choice +training_split: train +validation_split: eval +doc_to_text: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +doc_to_target: "{{answer_right_ending-1}}" +doc_to_choice: "{{[sentence_quiz1, sentence_quiz2]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b3127cdfa5dfd4249566b12dc9b1451018a88581 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_en.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_en +dataset_name: en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60af1f8c0a7b8b0917060d592c663fe6212e0210 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_es.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_es +dataset_name: es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_eu.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_eu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..849caccf2425ec1483baddb83d8c98b8d1eb79e3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_eu.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_eu +dataset_name: eu diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_hi.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c00c75f0e3cba53c17174723d714fde8dc8c351 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_hi.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_hi +dataset_name: hi diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_id.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_id.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c044d7532c4539e287aaa429d4042feff7c6d733 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_id.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_id +dataset_name: id diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_my.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_my.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47c3ae187209901b1fd711e680c1c3d46fdff48e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_my.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_my +dataset_name: my diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8617ab08db68e066c4165b0480801b2e5e16d9a2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_ru.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_ru +dataset_name: ru diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_sw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22b7f3b461fb628102face370fb8b48d7d442241 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_sw.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_sw +dataset_name: sw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_te.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..946861d4f090d25d0b221c1c8eeca4e59249a380 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_te.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_te +dataset_name: te diff --git a/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a55989fe2f64e6cb0dcf5136c35a1d5bf1ee4ae6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/xstorycloze/default_zh.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_zh +dataset_name: zh