diff --git a/lm-evaluation/lm_eval/tasks/glue/README.md b/lm-evaluation/lm_eval/tasks/glue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..573c640e87c1ba077d6d9cbe79a045c7c4f02ddf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/README.md @@ -0,0 +1,72 @@ +# GLUE +**NOTE**: GLUE benchmark tasks do not provide publicly accessible labels for their test sets, so we default to the validation sets for all sub-tasks. + +### Paper + +Title: `GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding` + +Abstract: https://openreview.net/pdf?id=rJ4km2R5t7 + +The General Language Understanding Evaluation (GLUE) benchmark is a collection of +resources for training, evaluating, and analyzing natural language understanding +systems. GLUE consists of: +- A benchmark of nine sentence- or sentence-pair language understanding tasks built +on established existing datasets and selected to cover a diverse range of dataset +sizes, text genres, and degrees of difficulty, and +- A diagnostic dataset designed to evaluate and analyze model performance with +respect to a wide range of linguistic phenomena found in natural language. + +Homepage: https://gluebenchmark.com/ + +### Citation + +``` +@inproceedings{wang-etal-2018-glue, + title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", + author = "Wang, Alex and + Singh, Amanpreet and + Michael, Julian and + Hill, Felix and + Levy, Omer and + Bowman, Samuel", + booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}", + month = nov, + year = "2018", + address = "Brussels, Belgium", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/W18-5446", + doi = "10.18653/v1/W18-5446", + pages = "353--355", + abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.", +} +``` + +### Groups and Tasks + +#### Groups + +* `glue`: Run all Glue subtasks. + +#### Tasks + +* `cola` +* `mnli` +* `mrpc` +* `qnli` +* `qqp` +* `rte` +* `sst` +* `wnli` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/glue/cola/default.yaml b/lm-evaluation/lm_eval/tasks/glue/cola/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a46003c2766ea26a96a6c6b73b750cb5e402119e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/cola/default.yaml @@ -0,0 +1,16 @@ +group: glue +task: cola +dataset_path: glue +dataset_name: cola +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: sentence +metric_list: + - metric: mcc +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/mnli/default.yaml b/lm-evaluation/lm_eval/tasks/glue/mnli/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6caffa85a22719f597f5b780b0653ee124a854c5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/mnli/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: mnli +dataset_path: glue +dataset_name: mnli +output_type: multiple_choice +training_split: train +validation_split: validation_matched +doc_to_text: !function utils.doc_to_text +doc_to_target: label +doc_to_choice: ["True", "Neither", "False"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/mnli/mismatch.yaml b/lm-evaluation/lm_eval/tasks/glue/mnli/mismatch.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e9b49bcd423ce43bf87f044c75a01e75f44d3d0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/mnli/mismatch.yaml @@ -0,0 +1,3 @@ +include: default.yaml +task: mnli_mismatch +validation_split: validation_mismatched diff --git a/lm-evaluation/lm_eval/tasks/glue/mnli/utils.py b/lm-evaluation/lm_eval/tasks/glue/mnli/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5fdaec2905ac7cf95ac3e50f1d12c728f59c37 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/mnli/utils.py @@ -0,0 +1,6 @@ +def doc_to_text(doc) -> str: + return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format( + doc["premise"], + doc["hypothesis"].strip() + + ("" if doc["hypothesis"].strip().endswith(".") else "."), + ) diff --git a/lm-evaluation/lm_eval/tasks/glue/mrpc/default.yaml b/lm-evaluation/lm_eval/tasks/glue/mrpc/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0bc24510ca533bde719cba42fb9d079cfb4a53b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/mrpc/default.yaml @@ -0,0 +1,15 @@ +group: glue +task: mrpc +dataset_path: glue +dataset_name: mrpc +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/qqp/default.yaml b/lm-evaluation/lm_eval/tasks/glue/qqp/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcd82f26bc8552c74f85b23054d90b9084a89211 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/qqp/default.yaml @@ -0,0 +1,15 @@ +group: glue +task: qqp +dataset_path: glue +dataset_name: qqp +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Question 1: {{question1}}\nQuestion 2: {{question2}}\nQuestion: Do both questions ask the same thing?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 2.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/rte/default.yaml b/lm-evaluation/lm_eval/tasks/glue/rte/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b12096a46b2a4fcc3f6f59b4f2d245130425c01 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/rte/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: rte +dataset_path: glue +dataset_name: rte +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:" +doc_to_target: label +doc_to_choice: ["True", "False"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/sst2/default.yaml b/lm-evaluation/lm_eval/tasks/glue/sst2/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..838afeb218891da139dec48083fa1990fc896b07 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/sst2/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: sst2 +dataset_path: glue +dataset_name: sst2 +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:" +doc_to_target: label +doc_to_choice: ["negative", "positive"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/glue/wnli/default.yaml b/lm-evaluation/lm_eval/tasks/glue/wnli/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8e57a35d67920b7101a4f9e92f873c3c7ec3134 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/glue/wnli/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: wnli +dataset_path: glue +dataset_name: wnli +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:" +doc_to_target: label +doc_to_choice: ["False", "True"] +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/README.md b/lm-evaluation/lm_eval/tasks/kmmlu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7fde78fdaa3e27f4d03fca6c45ca35160c351147 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/README.md @@ -0,0 +1,54 @@ +# k_mmlu + +### Paper + +Title: `KMMLU : Measuring Massive Multitask Language Understanding in Korean` + +Abstract: `We propose KMMLU, a new Korean benchmark with 35,030 expert-level multiple-choice questions across 45 subjects ranging from humanities to STEM. Unlike previous Korean benchmarks that are translated from existing English benchmarks, KMMLU is collected from original Korean exams, capturing linguistic and cultural aspects of the Korean language. We test 26 publicly available and proprietary LLMs, identifying significant room for improvement. The best publicly available model achieves 50.54% on KMMLU, far below the average human performance of 62.6%. This model was primarily trained for English and Chinese, not Korean. Current LLMs tailored to Korean, such as Polyglot-Ko, perform far worse. Surprisingly, even the most capable proprietary LLMs, e.g., GPT-4 and HyperCLOVA X, achieve 59.95% and 53.40%, respectively. This suggests that further work is needed to improve Korean LLMs, and KMMLU offers the right tool to track this progress. We make our dataset publicly available on the Hugging Face Hub and integrate the benchmark into EleutherAI's Language Model Evaluation Harness.` + +Note: lm-eval-harness is using the micro average as the default. To replicate the test results in the paper, take the macro average for the scores evaluated with lm-eval-harness + +Homepage: https://huggingface.co/datasets/HAERAE-HUB/KMMLU + +### Citation + +@article{son2024kmmlu, + title={KMMLU: Measuring Massive Multitask Language Understanding in Korean}, + author={Guijin Son and Hanwool Lee and Sungdong Kim and Seungone Kim and Niklas Muennighoff and Taekyoon Choi and Cheonbok Park and Kang Min Yoo and Stella Biderman}, + journal={arXiv preprint arXiv:2402.11548}, + year={2024} +} + +### Groups and Tasks + +#### Groups + +* `kmmlu`: 'All 45 subjects of the KMMLU dataset, evaluated following the methodology in MMLU's original implementation' +* `kmmlu_direct`: 'kmmlu_direct solves questions using a straightforward *generative* multiple-choice question-answering approach' +* `kmmlu_hard`: 'kmmlu_hard comprises difficult questions that at least one proprietary model failed to answer correctly using log-likelihood approach' +* `kmmlu_hard_direct`: 'kmmlu_hard_direct solves questions of kmmlu_hard using direct(generative) approach' +* `kmmlu_hard_cot`: 'kmmlu_hard_cot includes 5-shot of exemplars for chain-of-thought approach' + +#### Tasks + +The following tasks evaluate subjects in the KMMLU dataset +- `kmmlu_direct_{subject_english}` + +The following tasks evaluate subjects in the KMMLU-Hard dataset +- `kmmlu_hard_{subject_english}` +- `kmmlu_hard_cot_{subject_english}` +- `kmmlu_hard_direct_{subject_english}` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/_direct_kmmlu_yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/_direct_kmmlu_yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a4a6dbcf011d766caf9bb16bef085b93da44a39 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/_direct_kmmlu_yaml @@ -0,0 +1,27 @@ +group: + - kmmlu + - kmmlu_direct +dataset_path: HAERAE-HUB/KMMLU +output_type: generate_until +test_split: test +fewshot_split: dev +doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n정답:" +doc_to_target: "{{['A', 'B', 'C', 'D'][answer-1]}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - " " +generation_kwargs: + until: + - "Q:" + - "\n\n" + - "" + - "." + do_sample: false + temperature: 0.0 +metadata: + version: 2.0 diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_accounting.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d7736e8d5b918f58ffc4dfa19e3e6bd6af898980 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_accounting.yaml @@ -0,0 +1,3 @@ +dataset_name: Accounting +include: _direct_kmmlu_yaml +task: kmmlu_direct_accounting diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebe1765b34a3fe774d45869552d0f69e80285896 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml @@ -0,0 +1,3 @@ +dataset_name: Biology +include: _direct_kmmlu_yaml +task: kmmlu_direct_biology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98ed98dd2cc5f90039d98b74ca0f711809232e14 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: Civil-Engineering +include: _direct_kmmlu_yaml +task: kmmlu_direct_civil_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c546e738d68db7e281b5d70bbf9771bced6c1300 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml @@ -0,0 +1,3 @@ +dataset_name: Computer-Science +include: _direct_kmmlu_yaml +task: kmmlu_direct_computer_science diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0af2a16cfc082d58903758234ed0e36de0333c9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml @@ -0,0 +1,3 @@ +dataset_name: Construction +include: _direct_kmmlu_yaml +task: kmmlu_direct_construction diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d182903e2abe1f3c2b3f5d4cbe955bb1bcf58c9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml @@ -0,0 +1,3 @@ +dataset_name: Ecology +include: _direct_kmmlu_yaml +task: kmmlu_direct_ecology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f0d77eb78a61cd2b7b00b80311b59b011abc47e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml @@ -0,0 +1,3 @@ +dataset_name: Health +include: _direct_kmmlu_yaml +task: kmmlu_direct_health diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1aa277a70d03a617e673c27bba1cc2d7440d156 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml @@ -0,0 +1,3 @@ +dataset_name: Korean-History +include: _direct_kmmlu_yaml +task: kmmlu_direct_korean_history diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bfb923c2a9ac76515f3796a5a8c73770ed9fc586 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml @@ -0,0 +1,3 @@ +dataset_name: Machine-Design-and-Manufacturing +include: _direct_kmmlu_yaml +task: kmmlu_direct_machine_design_and_manufacturing diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c5d28af05edd5bb5c3c9207930c1994068ce1fe --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml @@ -0,0 +1,3 @@ +dataset_name: Math +include: _direct_kmmlu_yaml +task: kmmlu_direct_math diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2afff2c373a4e5a201a233de96d71baf6d980937 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml @@ -0,0 +1,3 @@ +dataset_name: Patent +include: _direct_kmmlu_yaml +task: kmmlu_direct_patent diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2209abbf05d8f78017fdcdc6b4178d5c48a2305a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml @@ -0,0 +1,3 @@ +dataset_name: Political-Science-and-Sociology +include: _direct_kmmlu_yaml +task: kmmlu_direct_political_science_and_sociology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa13bdff6a4791c8e20fe905a84db0586af11afa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml @@ -0,0 +1,3 @@ +dataset_name: Social-Welfare +include: _direct_kmmlu_yaml +task: kmmlu_direct_social_welfare diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca805e955ec5ce5cb25e00e321f489646e89628f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml @@ -0,0 +1,3 @@ +dataset_name: accounting +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_accounting diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_agricultural_sciences.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_agricultural_sciences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7348344468bf57bb54a15063d5e59483c17a22c1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_agricultural_sciences.yaml @@ -0,0 +1,3 @@ +dataset_name: agricultural_sciences +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_agricultural_sciences diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml new file mode 100644 index 0000000000000000000000000000000000000000..25c91cb6e5e55fcc578bd455086b994f1dd51d8c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml @@ -0,0 +1,3 @@ +dataset_name: aviation_engineering_and_maintenance +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_aviation_engineering_and_maintenance diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_biology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_biology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7bc8417b030a06bfd2308384525e6a5b4dcacc4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_biology.yaml @@ -0,0 +1,3 @@ +dataset_name: biology +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_biology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_chemical_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_chemical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..063974afd2f1ba984722043d50b6c4aaabbc1323 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_chemical_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: chemical_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_chemical_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_civil_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_civil_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba2c23b2d1866b4b0dfe71304758e26e94a42a89 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_civil_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: civil_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_civil_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_computer_science.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a388ff474281c525b8e674f204376c16e522641 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_computer_science.yaml @@ -0,0 +1,3 @@ +dataset_name: computer_science +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_computer_science diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_construction.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_construction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..faab391b9012efdf167a43105649313cb46a1c47 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_construction.yaml @@ -0,0 +1,3 @@ +dataset_name: construction +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_construction diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..adedf9d6e704a36368249260114aa8a80954a24a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml @@ -0,0 +1,3 @@ +dataset_name: ecology +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_ecology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f42e5b8dad2a7f4481dbd7d5e476ccccef222ede --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml @@ -0,0 +1,3 @@ +dataset_name: economics +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_economics diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_education.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_education.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c90432fe26075d1c14f84f5765f8e3198deb2ed --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_education.yaml @@ -0,0 +1,3 @@ +dataset_name: education +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_education diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_electronics_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_electronics_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e01781549fd0bf1982b895ba2041c3d6f9ec9644 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_electronics_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: electronics_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_electronics_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_energy_management.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_energy_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4c2ca7d643d71d3f1464e1f35bd49e944738ee6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_energy_management.yaml @@ -0,0 +1,3 @@ +dataset_name: energy_management +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_energy_management diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_environmental_science.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_environmental_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de511a09f02c411dedba2ac816a34c11b6805caa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_environmental_science.yaml @@ -0,0 +1,3 @@ +dataset_name: environmental_science +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_environmental_science diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_fashion.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_fashion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26f0617dfb641bd11f45f482c7180e12a318a0f5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_fashion.yaml @@ -0,0 +1,3 @@ +dataset_name: fashion +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_fashion diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_food_processing.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_food_processing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e48143d2c3bc7a69db87ac5d68f4a8951c1d391d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_food_processing.yaml @@ -0,0 +1,3 @@ +dataset_name: food_processing +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_food_processing diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_gas_technology_and_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_gas_technology_and_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eb5211ad857bfe99cc41062f21b8c47d008c3c64 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_gas_technology_and_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: gas_technology_and_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_gas_technology_and_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_geomatics.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_geomatics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a25f3c1a7eefe75cd11ce6d45f62ab898f30922b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_geomatics.yaml @@ -0,0 +1,3 @@ +dataset_name: geomatics +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_geomatics diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_health.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_health.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fef809eebe36f65d541ce8741e4e0f2ac054da1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_health.yaml @@ -0,0 +1,3 @@ +dataset_name: health +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_health diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_industrial_engineer.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_industrial_engineer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d7ca26e58ac90c69cb2bffcf7a4d95657b019019 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_industrial_engineer.yaml @@ -0,0 +1,3 @@ +dataset_name: industrial_engineer +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_industrial_engineer diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_interior_architecture_and_design.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_interior_architecture_and_design.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b1303810a9fbee6d966095fabbcc773dc489e71 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_interior_architecture_and_design.yaml @@ -0,0 +1,3 @@ +dataset_name: interior_architecture_and_design +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_interior_architecture_and_design diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_korean_history.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_korean_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4d595d19636e0698930b82b7f1d6c1605d50e10 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_korean_history.yaml @@ -0,0 +1,3 @@ +dataset_name: korean_history +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_korean_history diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_law.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..168f0340590d9736548eaeb56335e734d756fdac --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_law.yaml @@ -0,0 +1,3 @@ +dataset_name: law +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_law diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_machine_design_and_manufacturing.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_machine_design_and_manufacturing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73665b1bc0721e918c06ecc7b4256aceda23f704 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_machine_design_and_manufacturing.yaml @@ -0,0 +1,3 @@ +dataset_name: machine_design_and_manufacturing +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_machine_design_and_manufacturing diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_maritime_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_maritime_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4078cf973b90f3e03ac88a7670b3344a159fef2e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_maritime_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: maritime_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_maritime_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_marketing.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_marketing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37d62bb1bad3e89181247bc4dfa0d8b9d4abbaaf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_marketing.yaml @@ -0,0 +1,3 @@ +dataset_name: marketing +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_marketing diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_mechanical_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_mechanical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dae55511a963529a8980118cdf6a9971eae611bc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_mechanical_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: mechanical_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_mechanical_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_nondestructive_testing.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_nondestructive_testing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ff9583743953fde9d681a9d4c4655b72d7c7e3c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_nondestructive_testing.yaml @@ -0,0 +1,3 @@ +dataset_name: nondestructive_testing +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_nondestructive_testing diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_political_science_and_sociology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_political_science_and_sociology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8a5d96b6000a27ff3631fbf4c42b89ea3a41fc9a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_political_science_and_sociology.yaml @@ -0,0 +1,3 @@ +dataset_name: political_science_and_sociology +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_political_science_and_sociology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_psychology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fbf0d3191e885cd1486caf148d1c723ea142ee2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_psychology.yaml @@ -0,0 +1,3 @@ +dataset_name: psychology +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_psychology diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_railway_and_automotive_engineering.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_railway_and_automotive_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0eb534e579c125e2e9951443649a5fbc084da47f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_railway_and_automotive_engineering.yaml @@ -0,0 +1,3 @@ +dataset_name: railway_and_automotive_engineering +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_railway_and_automotive_engineering diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_real_estate.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_real_estate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c3df599ee0bae86ec979fabd1b3b118c3034c08 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_real_estate.yaml @@ -0,0 +1,3 @@ +dataset_name: real_estate +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_real_estate diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_refrigerating_machinery.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_refrigerating_machinery.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f62e8e9559fb0f0cb8795afd7027093b65d822f1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_refrigerating_machinery.yaml @@ -0,0 +1,3 @@ +dataset_name: refrigerating_machinery +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_refrigerating_machinery diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_social_welfare.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_social_welfare.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ad4dc2cf373aab0a4ee7e56c9e5ec66b5cd7bcec --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_social_welfare.yaml @@ -0,0 +1,3 @@ +dataset_name: social_welfare +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_social_welfare diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_taxation.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_taxation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..445ab693d6a3064ea35a169d2d7327f6f0942687 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_taxation.yaml @@ -0,0 +1,3 @@ +dataset_name: taxation +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_taxation diff --git a/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_telecommunications_and_wireless_technology.yaml b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_telecommunications_and_wireless_technology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..498b2fb2d661089325953ea8de407e08fb9d4934 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_telecommunications_and_wireless_technology.yaml @@ -0,0 +1,3 @@ +dataset_name: telecommunications_and_wireless_technology +include: _direct_hard_kmmlu_yaml +task: kmmlu_hard_direct_telecommunications_and_wireless_technology diff --git a/lm-evaluation/lm_eval/tasks/siqa/README.md b/lm-evaluation/lm_eval/tasks/siqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca58844b90079a607dd1a6a8a049106c26f57deb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/siqa/README.md @@ -0,0 +1,37 @@ +# Social IQA + +### Paper + +Title: Social IQA: Commonsense Reasoning about Social Interactions + +Abstract: https://arxiv.org/abs/1904.09728 + +> We introduce Social IQa, the first largescale benchmark for commonsense reasoning about social situations. Social IQa contains 38,000 multiple choice questions for probing emotional and social intelligence in a variety of everyday situations (e.g., Q: "Jordan wanted to tell Tracy a secret, so Jordan leaned towards Tracy. Why did Jordan do this?" A: "Make sure no one else could hear"). Through crowdsourcing, we collect commonsense questions along with correct and incorrect answers about social interactions, using a new framework that mitigates stylistic artifacts in incorrect answers by asking workers to provide the right answer to a different but related question. Empirical results show that our benchmark is challenging for existing question-answering models based on pretrained language models, compared to human performance (>20% gap). Notably, we further establish Social IQa as a resource for transfer learning of commonsense knowledge, achieving state-of-the-art performance on multiple commonsense reasoning tasks (Winograd Schemas, COPA). + +Homepage: https://allenai.org/data/socialiqa + + +### Citation + +``` +@inproceedings{sap2019social, + title={Social IQa: Commonsense Reasoning about Social Interactions}, + author={Sap, Maarten and Rashkin, Hannah and Chen, Derek and Le Bras, Ronan and Choi, Yejin}, + booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, + pages={4463--4473}, + year={2019} +} +``` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [X] Is the task an existing benchmark in the literature? + * [X] Have you referenced the original paper that introduced the task? + * [X] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? The original paper doesn't have an associated implementation, but there is an official entry in [BigBench](https://github.com/google/BIG-bench/tree/main/bigbench/benchmark_tasks/social_iqa). I use the same prompting format as BigBench. + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/siqa/siqa.yaml b/lm-evaluation/lm_eval/tasks/siqa/siqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..191ffa8d30bae64d4039b235ed857ba5106f3b65 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/siqa/siqa.yaml @@ -0,0 +1,19 @@ +task: social_iqa +dataset_path: social_i_qa +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Q: {{context}} {{question}}\nA:" +target_delimiter: " " +doc_to_choice: + - "{{answerA}}" + - "{{answerB}}" + - "{{answerC}}" +doc_to_target: "{{ (label|int) - 1 }}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/translation/iwslt2017_en-ar.yaml b/lm-evaluation/lm_eval/tasks/translation/iwslt2017_en-ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..891ad50fd6fb60fdb8f21f9004857d739a15640f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/translation/iwslt2017_en-ar.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["ar"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Arabic phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-en-ar diff --git a/lm-evaluation/lm_eval/tasks/translation/wmt14_en-fr.yaml b/lm-evaluation/lm_eval/tasks/translation/wmt14_en-fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7e42dca5acca5036ec8b3b619501557c6a1c36c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/translation/wmt14_en-fr.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["fr"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + French phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-en-fr diff --git a/lm-evaluation/lm_eval/tasks/translation/wmt16_en-ro.yaml b/lm-evaluation/lm_eval/tasks/translation/wmt16_en-ro.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45a8cae11824bd726064448422f021ec73d7ce87 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/translation/wmt16_en-ro.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["ro"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Romanian phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-ro diff --git a/lm-evaluation/lm_eval/tasks/translation/wmt_common_yaml b/lm-evaluation/lm_eval/tasks/translation/wmt_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cb3c7c8f8d8305e9907c89c94d6f8fd95c709fc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/translation/wmt_common_yaml @@ -0,0 +1,17 @@ +output_type: generate_until +training_split: train +validation_split: validation +fewshot_split: validation +test_split: test +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_ar.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a52966d5a76138be4821d38c5bd639701586061 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_ar.yaml @@ -0,0 +1,18 @@ +group: xstorycloze +task: xstorycloze_ar +dataset_path: juletxara/xstory_cloze +dataset_name: ar +output_type: multiple_choice +training_split: train +validation_split: eval +doc_to_text: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +doc_to_target: "{{answer_right_ending-1}}" +doc_to_choice: "{{[sentence_quiz1, sentence_quiz2]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_en.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b3127cdfa5dfd4249566b12dc9b1451018a88581 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_en.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_en +dataset_name: en diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_es.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60af1f8c0a7b8b0917060d592c663fe6212e0210 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_es.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_es +dataset_name: es diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_eu.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_eu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..849caccf2425ec1483baddb83d8c98b8d1eb79e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_eu.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_eu +dataset_name: eu diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_hi.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c00c75f0e3cba53c17174723d714fde8dc8c351 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_hi.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_hi +dataset_name: hi diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_my.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_my.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47c3ae187209901b1fd711e680c1c3d46fdff48e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_my.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_my +dataset_name: my diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_ru.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8617ab08db68e066c4165b0480801b2e5e16d9a2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_ru.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_ru +dataset_name: ru diff --git a/lm-evaluation/lm_eval/tasks/xstorycloze/default_zh.yaml b/lm-evaluation/lm_eval/tasks/xstorycloze/default_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a55989fe2f64e6cb0dcf5136c35a1d5bf1ee4ae6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/xstorycloze/default_zh.yaml @@ -0,0 +1,3 @@ +include: default_ar.yaml +task: xstorycloze_zh +dataset_name: zh