diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/README.md b/lm-evaluation/lm_eval/tasks/arithmetic/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7d7f89efbbd3af29e5e1c28b1af1adb93073569 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/README.md @@ -0,0 +1,60 @@ +# Arithmetic + +### Paper + +Title: `Language Models are Few-Shot Learners` +Abstract: https://arxiv.org/abs/2005.14165 + +A small battery of 10 tests that involve asking language models a simple arithmetic +problem in natural language. + +Homepage: https://github.com/openai/gpt-3/tree/master/data + + +### Citation + +``` +@inproceedings{NEURIPS2020_1457c0d6, + author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {1877--1901}, + publisher = {Curran Associates, Inc.}, + title = {Language Models are Few-Shot Learners}, + url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf}, + volume = {33}, + year = {2020} +} +``` + +### Groups and Tasks + +#### Groups + +* `arithmetic`: Evaluates `1dc` to `5ds` + +#### Tasks + +* `arithmetic_1dc` +* `arithmetic_2da` +* `arithmetic_2dm` +* `arithmetic_2ds` +* `arithmetic_3da` +* `arithmetic_3ds` +* `arithmetic_4da` +* `arithmetic_4ds` +* `arithmetic_5da` +* `arithmetic_5ds` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e8d414a60c1f9df7c635fafd34b7a2f39a36865 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml @@ -0,0 +1,18 @@ +group: + - arithmetic +task: arithmetic_1dc +dataset_path: EleutherAI/arithmetic +dataset_name: arithmetic_1dc +output_type: loglikelihood +validation_split: validation +test_split: null +doc_to_text: "{{context}}" +doc_to_target: "{{completion}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2da.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a186d76e8971072947dd6e9322e701ecc8815e89 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2da +dataset_name: arithmetic_2da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..471bd4b4449f280412d9ee69566d4f80fd623671 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2dm +dataset_name: arithmetic_2dm +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8e762486b818ee8b2962c94f46edaefb36da6b5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2ds +dataset_name: arithmetic_2ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3da.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4870d04f0c47ea61a75504ce051bd929ee1840e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_3da +dataset_name: arithmetic_3da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37f9ff0d2536d6c55c3e0f1676fe8218395d7b6c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_3ds +dataset_name: arithmetic_3ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4da.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4c04c6249fc520010317fe2503813acf86780844 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_4da +dataset_name: arithmetic_4da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..282b3d1e51e886b3509a68ffb921238eb8e49cb0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_4ds +dataset_name: arithmetic_4ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5da.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5da.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5365cfbeb94d8fea5d782500a8f88ecfc19dafdb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5da.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_5da +dataset_name: arithmetic_5da +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51d95da0074dd32b7c99e0d80e2a54765279c5bc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_5ds +dataset_name: arithmetic_5ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8759bc4d86152af04e0cccf33f01306893595d19 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "arb_Latn" +"include": "_default_template_yaml" +"task": "belebele_arb_Latn" +"test_split": "arb_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ad49a8eae0550ddd23ca51839c2d72b31031725 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ckb_Arab" +"include": "_default_template_yaml" +"task": "belebele_ckb_Arab" +"test_split": "ckb_Arab" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ita_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ita_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69c2faffd004bf654f954d428559cf62df755496 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ita_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ita_Latn" +"include": "_default_template_yaml" +"task": "belebele_ita_Latn" +"test_split": "ita_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_kin_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_kin_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..188a1691c7e8230ae178ed058513f973a1c18073 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_kin_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "kin_Latn" +"include": "_default_template_yaml" +"task": "belebele_kin_Latn" +"test_split": "kin_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5303cdd976b5b1f2ebbe1f36e8661ab966a856b6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mlt_Latn" +"include": "_default_template_yaml" +"task": "belebele_mlt_Latn" +"test_split": "mlt_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Deva.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Deva.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8126174671cdcc412958b90d1bc3051a8d4386a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Deva.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "npi_Deva" +"include": "_default_template_yaml" +"task": "belebele_npi_Deva" +"test_split": "npi_Deva" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..788d6959976320f5fb962e442aa8fa9c2ed9cca8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ssw_Latn" +"include": "_default_template_yaml" +"task": "belebele_ssw_Latn" +"test_split": "ssw_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_tur_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_tur_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee490bb0bab9c0f32c8e62d6d0bb553cbb91a192 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_tur_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tur_Latn" +"include": "_default_template_yaml" +"task": "belebele_tur_Latn" +"test_split": "tur_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_urd_Arab.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_urd_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8c54e9ea623535b89b9147c9d9660a5723c5bdd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_urd_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "urd_Arab" +"include": "_default_template_yaml" +"task": "belebele_urd_Arab" +"test_split": "urd_Arab" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hant.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hant.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8604e58b6b747cd32a0621fe2a1858a3102da36 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hant.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "zho_Hant" +"include": "_default_template_yaml" +"task": "belebele_zho_Hant" +"test_split": "zho_Hant" diff --git a/lm-evaluation/lm_eval/tasks/fld/README.md b/lm-evaluation/lm_eval/tasks/fld/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c7d88e3df69a6690c9da2c897cdf0b3d7311e05 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/fld/README.md @@ -0,0 +1,64 @@ +# FLD + +### Paper + +Title: Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic + +Abstract: https://arxiv.org/abs/2308.07336 + +**FLD** (**F**ormal **L**ogic **D**eduction) is a deductive reasoning benchmark. +Given a set of facts and a hypothesis, an LLM is required to generate (i) proof steps to (dis-)prove the hypothesis, and (ii) an answer ("proved", "disproved" or unknown"). + +Unique features of FLD are: +* It assesses the model's logical reasoning ability *isolated from knowledge*, as the facts are randomly constructed so that referring to existing knowledge never helps solve the task. +* It assesses diverse reasoning patterns (i.e., deduction rules), as it is based on formal logic theory. +* As a result, it is highly challenging. Indeed, even GPT-4 can solve only about half of the problems. + +Homepage: https://github.com/hitachi-nlp/FLD + + +### Citation + +``` +@InProceedings{pmlr-v202-morishita23a, + title = {Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic}, + author = {Morishita, Terufumi and Morio, Gaku and Yamaguchi, Atsuki and Sogawa, Yasuhiro}, + booktitle = {Proceedings of the 40th International Conference on Machine Learning}, + pages = {25254--25274}, + year = {2023}, + editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan}, + volume = {202}, + series = {Proceedings of Machine Learning Research}, + month = {23--29 Jul}, + publisher = {PMLR}, + pdf = {https://proceedings.mlr.press/v202/morishita23a/morishita23a.pdf}, + url = {https://proceedings.mlr.press/v202/morishita23a.html}, +} +``` + +### Groups and Tasks + +#### Groups + +* `fld` + +#### Tasks + +This release is the simplified version of FLD where a model is required to predict only an answer. +This setting is described by "answer accuracy" in the original paper. + +* `fld_default` is a basic task based on [FLD.v2](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) +* `fld_star`: is a more challenging version based on [FLD.v2-star](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/fld/fld_default.yaml b/lm-evaluation/lm_eval/tasks/fld/fld_default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..233a3564a3ffb6d207dd397103a27bd37c43dc22 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/fld/fld_default.yaml @@ -0,0 +1,21 @@ +group: + - fld +task: fld_default +dataset_path: hitachi-nlp/FLD.v2 +dataset_name: default +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. {{prompt_serial}}" +doc_to_target: world_assump_label +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +metadata: + version: 2.0 diff --git a/lm-evaluation/lm_eval/tasks/fld/fld_star.yaml b/lm-evaluation/lm_eval/tasks/fld/fld_star.yaml new file mode 100644 index 0000000000000000000000000000000000000000..750e808c780001e4659c9def75400f8a2460045e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/fld/fld_star.yaml @@ -0,0 +1,3 @@ +include: fld_default.yaml +task: fld_star +dataset_name: star diff --git a/lm-evaluation/lm_eval/tasks/headqa/README.md b/lm-evaluation/lm_eval/tasks/headqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e061f0ed44e65ef04cc9d98220058051d509da6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/headqa/README.md @@ -0,0 +1,57 @@ +# HEAD-QA + +### Paper + +HEAD-QA: A Healthcare Dataset for Complex Reasoning +https://arxiv.org/pdf/1906.04701.pdf + +HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the +Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio +de Sanidad, Consumo y Bienestar Social. +The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology. + +Homepage: https://aghie.github.io/head-qa/ + + +### Citation + +``` +@inproceedings{vilares-gomez-rodriguez-2019-head, + title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning", + author = "Vilares, David and + G{\'o}mez-Rodr{\'i}guez, Carlos", + booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/P19-1092", + doi = "10.18653/v1/P19-1092", + pages = "960--966", + abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.", +} +``` + +### Groups and Tasks + +#### Groups + +- `headqa`: Evaluates `headqa_en` and `headqa_es` + +#### Tasks + +* `headqa_en` - English variant of HEAD-QA +* `headqa_es` - Spanish variant of HEAD-QA + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\ + * [x] Same as LM Evaluation Harness v0.3.0 implementation diff --git a/lm-evaluation/lm_eval/tasks/headqa/headqa_en.yaml b/lm-evaluation/lm_eval/tasks/headqa/headqa_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eeb2ff12dd4c05b08c199692c3e868b6b50fc362 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/headqa/headqa_en.yaml @@ -0,0 +1,23 @@ +group: + - headqa +task: headqa_en +dataset_path: EleutherAI/headqa +dataset_name: en +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{qtext}}\nAnswer:" +doc_to_target: "{{ra - 1}}" +doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int. +should_decontaminate: true +doc_to_decontamination_query: query +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/headqa/headqa_es.yaml b/lm-evaluation/lm_eval/tasks/headqa/headqa_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88e202f753e18f6fd6b8e303353cc0f38fce73e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/headqa/headqa_es.yaml @@ -0,0 +1,3 @@ +include: headqa_en.yaml +task: headqa_es +dataset_name: es diff --git a/lm-evaluation/lm_eval/tasks/hellaswag/README.md b/lm-evaluation/lm_eval/tasks/hellaswag/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9fdbac13581c06430b63248514b7cf5c9610c220 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/hellaswag/README.md @@ -0,0 +1,49 @@ +# HellaSwag + +### Paper + +Title: `HellaSwag: Can a Machine Really Finish Your Sentence?` + +Abstract: https://arxiv.org/abs/1905.07830 + +Recent work by Zellers et al. (2018) introduced a new task of commonsense natural language inference: given an event description such as "A woman sits at a piano," a machine must select the most likely followup: "She sets her fingers on the keys." With the introduction of BERT, near human-level performance was reached. Does this mean that machines can perform human level commonsense inference? +In this paper, we show that commonsense inference still proves difficult for even state-of-the-art models, by presenting HellaSwag, a new challenge dataset. Though its questions are trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). We achieve this via Adversarial Filtering (AF), a data collection paradigm wherein a series of discriminators iteratively select an adversarial set of machine-generated wrong answers. AF proves to be surprisingly robust. The key insight is to scale up the length and complexity of the dataset examples towards a critical 'Goldilocks' zone wherein generated text is ridiculous to humans, yet often misclassified by state-of-the-art models. +Our construction of HellaSwag, and its resulting difficulty, sheds light on the inner workings of deep pretrained models. More broadly, it suggests a new path forward for NLP research, in which benchmarks co-evolve with the evolving state-of-the-art in an adversarial way, so as to present ever-harder challenges. + +Homepage: `https://rowanzellers.com/hellaswag/` + + +### Citation + +``` +@inproceedings{zellers2019hellaswag, + title={HellaSwag: Can a Machine Really Finish Your Sentence?}, + author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin}, + booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +- Not part of a group yet + +#### Tasks + +- `hellaswag` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/medqa/medqa.yaml b/lm-evaluation/lm_eval/tasks/medqa/medqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7d5555966fa0d4bcf2e8dc4a74eea7442ca433a3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/medqa/medqa.yaml @@ -0,0 +1,16 @@ +task: medqa_4options +dataset_path: GBaker/MedQA-USMLE-4-options-hf +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function preprocess_medqa.doc_to_text +doc_to_target: !function preprocess_medqa.doc_to_target +doc_to_choice: [ 'A', 'B', 'C', 'D' ] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/lm_eval/tasks/medqa/preprocess_medqa.py b/lm-evaluation/lm_eval/tasks/medqa/preprocess_medqa.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec35851453d7452833ceb30ec93f50ba495f594 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/medqa/preprocess_medqa.py @@ -0,0 +1,13 @@ +def doc_to_text(doc) -> str: + option_choices = { + "A": doc["ending0"], + "B": doc["ending1"], + "C": doc["ending2"], + "D": doc["ending3"], + } + answers = "".join((f"{k}. {v}\n") for k, v in option_choices.items()) + return f"Question: {doc['sent1']}\n{answers}Answer:" + + +def doc_to_target(doc) -> int: + return doc["label"] diff --git a/lm-evaluation/lm_eval/tasks/race/README.md b/lm-evaluation/lm_eval/tasks/race/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dfe6c5e8a50da470e22be690e9e10612d830f957 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/race/README.md @@ -0,0 +1,62 @@ +# RACE + +### Paper + +Title: `RACE: Large-scale ReAding Comprehension Dataset From Examinations` + +Abstract: https://arxiv.org/abs/1704.04683 + +RACE is a large-scale reading comprehension dataset with more than 28,000 passages +and nearly 100,000 questions. The dataset is collected from English examinations +in China, which are designed for middle school and high school students. The dataset +can be served as the training and test sets for machine comprehension. + +Homepage: https://www.cs.cmu.edu/~glai1/data/race/ + + +### Citation + +``` +@inproceedings{lai-etal-2017-race, + title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations", + author = "Lai, Guokun and + Xie, Qizhe and + Liu, Hanxiao and + Yang, Yiming and + Hovy, Eduard", + editor = "Palmer, Martha and + Hwa, Rebecca and + Riedel, Sebastian", + booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", + month = sep, + year = "2017", + address = "Copenhagen, Denmark", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/D17-1082", + doi = "10.18653/v1/D17-1082", + pages = "785--794" +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `race` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/race/preprocess_race.py b/lm-evaluation/lm_eval/tasks/race/preprocess_race.py new file mode 100644 index 0000000000000000000000000000000000000000..03a214e5747876325d118bf4660b0e5c7e9d5142 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/race/preprocess_race.py @@ -0,0 +1,40 @@ +import ast + + +def process_ast(string): + return ast.literal_eval(string) + + +def last_problem(doc): + return process_ast(doc["problems"])[-1] + + +def get_answer_option(problem): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[problem["answer"]] + return problem["options"][answer] + + +def doc_to_choice(doc): + problem = last_problem(doc) + choices = [problem["options"][i] for i in range(4)] + return choices + + +def doc_to_text(doc): + text = "Article: " + doc["article"] + "\n\n" + for problem in process_ast(doc["problems"])[:-1]: + if problem["question"][-6:] == " _ .": + text += problem["question"][-5:] + get_answer_option(problem) + "\n" + else: + question = "Question: " + problem["question"] + "\n" + answer = "Answer: " + get_answer_option(problem) + "\n" + text += question + answer + text += last_problem(doc)["question"] + return text + + +def doc_to_target(doc): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[last_problem(doc)["answer"]] + return answer diff --git a/lm-evaluation/lm_eval/tasks/race/race.yaml b/lm-evaluation/lm_eval/tasks/race/race.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b90b809f6120924f398372a454ce4ba74220bbe9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/race/race.yaml @@ -0,0 +1,16 @@ +task: race +dataset_path: EleutherAI/race +dataset_name: high +output_type: multiple_choice +test_split: test +doc_to_text: !function preprocess_race.doc_to_text +doc_to_target: !function preprocess_race.doc_to_target +doc_to_choice: !function preprocess_race.doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/_default_template_yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ece2e2d84cb43f6e1d7403ae83a73be41e164f7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/_default_template_yaml @@ -0,0 +1,19 @@ +dataset_path: ZoneTwelve/tmmluplus # a copy of `ikala/tmmluplus` +test_split: test +fewshot_split: train +fewshot_config: + sampler: first_n +output_type: multiple_choice +process_docs: !function utils.process_docs +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.1 diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/_generate_configs.py b/lm-evaluation/lm_eval/tasks/tmmluplus/default/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..e313e9b1ea053b4a97f19d8dcbcdfe2cf86f856a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/_generate_configs.py @@ -0,0 +1,210 @@ +""" +Take in a YAML, and output all "other" splits with this YAML +""" +import argparse +import os + +import pandas as pd +import yaml +from tqdm import tqdm + + +# Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py +# from TMMLU+ offical example +categories = { + "STEM": [ + "physics", + "chemistry", + "biology", + "computer science", + "math", + "engineering", + ], + "humanities": ["history", "philosophy", "law"], + "social_sciences": [ + "politics", + "culture", + "economics", + "geography", + "psychology", + "education", + ], + "other": ["other", "business", "health"], # (business, health, misc.) +} + +task_list = [ + "engineering_math", + "dentistry", + "traditional_chinese_medicine_clinical_medicine", + "clinical_psychology", + "technical", + "culinary_skills", + "mechanical", + "logic_reasoning", + "real_estate", + "general_principles_of_law", + "finance_banking", + "anti_money_laundering", + "ttqav2", + "marketing_management", + "business_management", + "organic_chemistry", + "advance_chemistry", + "physics", + "secondary_physics", + "human_behavior", + "national_protection", + "jce_humanities", + "politic_science", + "agriculture", + "official_document_management", + "financial_analysis", + "pharmacy", + "educational_psychology", + "statistics_and_machine_learning", + "management_accounting", + "introduction_to_law", + "computer_science", + "veterinary_pathology", + "accounting", + "fire_science", + "optometry", + "insurance_studies", + "pharmacology", + "taxation", + "education_(profession_level)", + "economics", + "veterinary_pharmacology", + "nautical_science", + "occupational_therapy_for_psychological_disorders", + "trust_practice", + "geography_of_taiwan", + "physical_education", + "auditing", + "administrative_law", + "basic_medical_science", + "macroeconomics", + "trade", + "chinese_language_and_literature", + "tve_design", + "junior_science_exam", + "junior_math_exam", + "junior_chinese_exam", + "junior_social_studies", + "tve_mathematics", + "tve_chinese_language", + "tve_natural_sciences", + "junior_chemistry", + "music", + "education", + "three_principles_of_people", + "taiwanese_hokkien", +] +subject2name = {} +# subject2category = {} +SUBJECTS = {} + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--base_yaml_path", required=True) + parser.add_argument("--save_prefix_path", default="tmmluplus") + parser.add_argument("--cot_prompt_path", default=None) + parser.add_argument("--task_prefix", default="") + parser.add_argument("--group_prefix", default="") + parser.add_argument("--subject_file", default="subject.tsv") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + from pathlib import Path + + # Initialization + SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file) + + df = pd.read_csv(SUBJECT_FILE, delimiter="\t") + + for _, row in df.iterrows(): + for _c in categories: + if row["subject"] in SUBJECTS: + raise ValueError("Duplicate tasks.") + if row["category"] in categories[_c]: # append new item into SUBJECTS + SUBJECTS[row["subject"]] = _c + subject2name[row["subject"]] = row["name"] + break + # End of SUBJECTS initialization + + # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs. + base_yaml_name = os.path.split(args.base_yaml_path)[-1] + with open(args.base_yaml_path) as f: + base_yaml = yaml.full_load(f) + + if args.cot_prompt_path is not None: + import json + + with open(args.cot_prompt_path) as f: + cot_file = json.load(f) + + ALL_CATEGORIES = [] + for subject, category in tqdm(SUBJECTS.items()): + if category not in ALL_CATEGORIES: + ALL_CATEGORIES.append(category) + + if args.cot_prompt_path is not None: + description = cot_file[subject] + else: + name_of_subject = subject2name[subject].replace("_", " ") + description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n" + # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n" + + yaml_dict = { + "include": base_yaml_name, + "group": f"tmmluplus_{args.task_prefix}_{category}" + if args.task_prefix != "" + else f"tmmluplus_{category}", + "group_alias": category.replace("_", " "), + "task": f"tmmluplus_{args.task_prefix}_{subject}" + if args.task_prefix != "" + else f"tmmluplus_{subject}", + "task_alias": subject.replace("_", " "), + "dataset_name": subject, + "description": description, + } + + file_save_path = args.save_prefix_path + f"_{subject}.yaml" + # eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + yaml_dict, + yaml_file, + # width=float("inf"), + allow_unicode=True, + default_style='"', + ) + + if args.task_prefix != "": + mmlu_subcategories = [ + f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES + ] + else: + mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES] + + if args.group_prefix != "": + file_save_path = args.group_prefix + ".yaml" + else: + file_save_path = args.save_prefix_path + ".yaml" + + # eval_logger.info(f"Saving benchmark config to {file_save_path}") + with open(file_save_path, "w") as yaml_file: + yaml.dump( + { + "group": f"tmmluplus_{args.task_prefix}" + if args.task_prefix != "" + else "tmmluplus", + "task": mmlu_subcategories, + }, + yaml_file, + indent=4, + default_flow_style=False, + ) diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..105cf98aff37b28535e8166ae685e5fac105eaed --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml @@ -0,0 +1,6 @@ +group: tmmluplus +task: +- tmmluplus_other +- tmmluplus_social_sciences +- tmmluplus_humanities +- tmmluplus_STEM diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..454efec64feb6daa0f4bc91f542b23003b2e62d9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml @@ -0,0 +1,7 @@ +"dataset_name": "administrative_law" +"description": "以下為行政法的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_administrative_law" +"task_alias": "administrative law" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..340369c89024e057f9e73945c32655555b666c29 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml @@ -0,0 +1,7 @@ +"dataset_name": "agriculture" +"description": "以下為農業的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_agriculture" +"task_alias": "agriculture" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95bb0e47861f3c37954e74ae9b1fe17095f3eaa7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml @@ -0,0 +1,7 @@ +"dataset_name": "anti_money_laundering" +"description": "以下為洗錢防制的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_anti_money_laundering" +"task_alias": "anti money laundering" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8168029b29291cab1e6f596acd51e00699e3cf2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml @@ -0,0 +1,7 @@ +"dataset_name": "auditing" +"description": "以下為審計學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_auditing" +"task_alias": "auditing" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cacf04896d941ec705d1c3774952cbb516236f5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "business_management" +"description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_business_management" +"task_alias": "business management" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8194feb7dee9c2100f6ecf50b602235d1ac0a2a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "clinical_psychology" +"description": "以下為臨床心理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_clinical_psychology" +"task_alias": "clinical psychology" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml new file mode 100644 index 0000000000000000000000000000000000000000..457eac1d18465a434abfd4916acffb8ac7d30529 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml @@ -0,0 +1,7 @@ +"dataset_name": "culinary_skills" +"description": "以下為餐旅的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_culinary_skills" +"task_alias": "culinary skills" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6295240fc3a37046d0c8d0038eb58130667a807 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "dentistry" +"description": "以下為牙醫學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_dentistry" +"task_alias": "dentistry" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ed100fb42d428d0afd0c26f560da9700eb30b04 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "economics" +"description": "以下為經濟學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_economics" +"task_alias": "economics" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc601810008841e7d2bca53680b237c2b52c16ff --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml @@ -0,0 +1,7 @@ +"dataset_name": "education" +"description": "以下為教育常識的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_education" +"task_alias": "education" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml new file mode 100644 index 0000000000000000000000000000000000000000..f986517b66c9f46443655b940c251007ba782c50 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml @@ -0,0 +1,7 @@ +"dataset_name": "education_(profession_level)" +"description": "以下為教育專業的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_education_(profession_level)" +"task_alias": "education (profession level)" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e60086e12e5e97ec9df7ff5c616df95f92762ed1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml @@ -0,0 +1,7 @@ +"dataset_name": "finance_banking" +"description": "以下為金融與法規的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_finance_banking" +"task_alias": "finance banking" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9990ab5d0447b969c6f5ae026d5db0d388a00b29 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml @@ -0,0 +1,7 @@ +"dataset_name": "financial_analysis" +"description": "以下為財務分析的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_financial_analysis" +"task_alias": "financial analysis" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30b21caeada339782994aeedb9d92d1c77b683c5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml @@ -0,0 +1,7 @@ +"dataset_name": "general_principles_of_law" +"description": "以下為法學大意的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_general_principles_of_law" +"task_alias": "general principles of law" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80ab36b73d77f58ef11f6a6aa047b51d2ca2cad2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml @@ -0,0 +1,7 @@ +"dataset_name": "geography_of_taiwan" +"description": "以下為台灣地理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_geography_of_taiwan" +"task_alias": "geography of taiwan" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa23be46c1af606deb01c860a4703d30edda019d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml @@ -0,0 +1,7 @@ +"dataset_name": "insurance_studies" +"description": "以下為保險學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_insurance_studies" +"task_alias": "insurance studies" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ff3bed0731b042baaaed575011b1c0ea6a26aff --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml @@ -0,0 +1,7 @@ +"dataset_name": "jce_humanities" +"description": "以下為指考人文科目的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_jce_humanities" +"task_alias": "jce humanities" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de9c0691cf1e5d3c4773f831c77d8056f355277f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_chemistry" +"description": "以下為國中理化的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_chemistry" +"task_alias": "junior chemistry" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a869a55f1445123550389b44b718df00d4dd2ef5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_math_exam" +"description": "以下為國中會考基測數學科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_math_exam" +"task_alias": "junior math exam" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..760ff0e794a401489dc7a7ddd3e258f2a707edde --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_social_studies" +"description": "以下為國中會考基測社會科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_social_studies" +"task_alias": "junior social studies" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91009abe691ffcc0c910729244620557ccad2d6c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "macroeconomics" +"description": "以下為總經的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_macroeconomics" +"task_alias": "macroeconomics" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da39f0a879b33956012c8f2fefba88586a9c4b4d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "marketing_management" +"description": "以下為行銷管理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_marketing_management" +"task_alias": "marketing management" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81ea0dce68b2a7d0be1733fd94fc37c997bf894f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml @@ -0,0 +1,7 @@ +"dataset_name": "mechanical" +"description": "以下為機械與機電概論的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_mechanical" +"task_alias": "mechanical" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72864c0035da8cb92b491773be7a8a5e8a3b1685 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml @@ -0,0 +1,7 @@ +"dataset_name": "music" +"description": "以下為音樂科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_music" +"task_alias": "music" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba2bfa827ed9f5edd8b0799fa9ca9127e16f7f4e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml @@ -0,0 +1,7 @@ +"dataset_name": "occupational_therapy_for_psychological_disorders" +"description": "以下為心理障礙職能治療學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_occupational_therapy_for_psychological_disorders" +"task_alias": "occupational therapy for psychological disorders" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e3b78b7edd3136d3ed8a10d5e959d3fb72bc7bd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "optometry" +"description": "以下為視光學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_optometry" +"task_alias": "optometry" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a096f9b4a242f12b207c6daa453d0dc2217e1f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "organic_chemistry" +"description": "以下為有機化學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_organic_chemistry" +"task_alias": "organic chemistry" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physical_education.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physical_education.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb3558e9baa4cf6ee4c1f19a244341a3a484861c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physical_education.yaml @@ -0,0 +1,7 @@ +"dataset_name": "physical_education" +"description": "以下為體育的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_physical_education" +"task_alias": "physical education" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a8443fe19d0e33011093547f6ada042188a5cee --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "physics" +"description": "以下為物理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_physics" +"task_alias": "physics" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba90b7aa565bf0102967508392d286e13c25a747 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml @@ -0,0 +1,7 @@ +"dataset_name": "real_estate" +"description": "以下為房地產的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_real_estate" +"task_alias": "real estate" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6865167cb1f51310ba30d7b4745e62bc878c5d8f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "secondary_physics" +"description": "以下為高中物理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_secondary_physics" +"task_alias": "secondary physics" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a6e68ba514f60c8c2f6760a80a09a3cd65eb1a0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml @@ -0,0 +1,7 @@ +"dataset_name": "statistics_and_machine_learning" +"description": "以下為統計與機器學習的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_statistics_and_machine_learning" +"task_alias": "statistics and machine learning" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89297df3158681f837462d90ead8660b563ee3e0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml @@ -0,0 +1,7 @@ +"dataset_name": "taiwanese_hokkien" +"description": "以下為閩南語的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_taiwanese_hokkien" +"task_alias": "taiwanese hokkien" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6167a8fe0f63000a8d714ef2ed286ed950297d54 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml @@ -0,0 +1,7 @@ +"dataset_name": "technical" +"description": "以下為技術工相關的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_technical" +"task_alias": "technical" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_traditional_chinese_medicine_clinical_medicine.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_traditional_chinese_medicine_clinical_medicine.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5a3fdf197c6f64ecda03af7c6119721ae18df11 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_traditional_chinese_medicine_clinical_medicine.yaml @@ -0,0 +1,7 @@ +"dataset_name": "traditional_chinese_medicine_clinical_medicine" +"description": "以下為中醫臨床醫學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_traditional_chinese_medicine_clinical_medicine" +"task_alias": "traditional chinese medicine clinical medicine" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c98fedd3725c1bcf7899328bb9b51a3b3ab5e02 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_chinese_language" +"description": "以下為統測國文的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_chinese_language" +"task_alias": "tve chinese language" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e36b1548025bb6a9a9f95cf58833ba94465aab4e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_design" +"description": "以下為統測 設計的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_design" +"task_alias": "tve design" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4158f17e9cf040bd5c38419493c2c3c50227ff4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_mathematics" +"description": "以下為統測數學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_mathematics" +"task_alias": "tve mathematics" diff --git a/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1fcb43897705ecf140fcbc34c69fb0b74f66331 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "veterinary_pathology" +"description": "以下為獸醫病理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_veterinary_pathology" +"task_alias": "veterinary pathology"