diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/README.md b/lm-evaluation-harness/lm_eval/tasks/belebele/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e08e63e8a11b563dad771f8ee99a1da167e55016 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/README.md @@ -0,0 +1,49 @@ +# Belebele + +### Paper + +The Belebele Benchmark for Massively Multilingual NLU Evaluation +https://arxiv.org/abs/2308.16884 + +Belebele is a multiple-choice machine reading comprehension (MRC) dataset spanning 122 language variants. This dataset enables the evaluation of mono- and multi-lingual models in high-, medium-, and low-resource languages. Each question has four multiple-choice answers and is linked to a short passage from the FLORES-200 dataset. The human annotation procedure was carefully curated to create questions that discriminate between different levels of generalizable language comprehension and is reinforced by extensive quality checks. While all questions directly relate to the passage, the English dataset on its own proves difficult enough to challenge state-of-the-art language models. Being fully parallel, this dataset enables direct comparison of model performance across all languages. Belebele opens up new avenues for evaluating and analyzing the multilingual abilities of language models and NLP systems. + +Homepage: https://github.com/facebookresearch/belebele + +### Citation + +```bibtex +@misc{bandarkar2023belebele, + title={The Belebele Benchmark: a Parallel Reading Comprehension Dataset in 122 Language Variants}, + author={Lucas Bandarkar and Davis Liang and Benjamin Muller and Mikel Artetxe and Satya Narayan Shukla and Donald Husa and Naman Goyal and Abhinandan Krishnan and Luke Zettlemoyer and Madian Khabsa}, + year={2023}, + eprint={2308.16884}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +- `belebele`: All 122 languages of the Belebele dataset, evaluated following the methodology in MMLU's original implementation. + +#### Tasks + + +The following tasks evaluate languages in the Belebele dataset using loglikelihood-based multiple-choice scoring: +- `belebele_{language}` + +The variant evaluated here is the 0-shot or few-shot evaluation with English Instructions. + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [ ] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_als_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_als_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f77ed55f10897c8efed0977abe9db4272227825e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_als_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "als_Latn" +"include": "_default_template_yaml" +"task": "belebele_als_Latn" +"test_split": "als_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ceb_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ceb_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9d2f89c90dd21c4293c13ab45a35d40858d2707 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ceb_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ceb_Latn" +"include": "_default_template_yaml" +"task": "belebele_ceb_Latn" +"test_split": "ceb_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_est_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_est_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6a56ca90c0309d9475adad9b95db272577658f36 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_est_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "est_Latn" +"include": "_default_template_yaml" +"task": "belebele_est_Latn" +"test_split": "est_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_grn_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_grn_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cdba6f64218c05a514409b7cdb5122158a02aae8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_grn_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "grn_Latn" +"include": "_default_template_yaml" +"task": "belebele_grn_Latn" +"test_split": "grn_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_guj_Gujr.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_guj_Gujr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..353ce6b598bdeba7d1dba8ca7baf187c89c2c3ca --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_guj_Gujr.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "guj_Gujr" +"include": "_default_template_yaml" +"task": "belebele_guj_Gujr" +"test_split": "guj_Gujr" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Deva.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Deva.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fa304c9b09100d17edd37a5f7caa1b11f5d22df --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Deva.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "hin_Deva" +"include": "_default_template_yaml" +"task": "belebele_hin_Deva" +"test_split": "hin_Deva" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f44f4f55f458197f0453be18fb5e389939da27b1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hin_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "hin_Latn" +"include": "_default_template_yaml" +"task": "belebele_hin_Latn" +"test_split": "hin_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69b100c44bcf9218e541a7f3c41020dedafbec88 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "hrv_Latn" +"include": "_default_template_yaml" +"task": "belebele_hrv_Latn" +"test_split": "hrv_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hye_Armn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hye_Armn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a57fa86451f834e5c4d8bea7d2961c2ff220b9d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_hye_Armn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "hye_Armn" +"include": "_default_template_yaml" +"task": "belebele_hye_Armn" +"test_split": "hye_Armn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ind_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ind_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c15fff3ec36a0b3b140581f317553b2fa0e2e62c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ind_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ind_Latn" +"include": "_default_template_yaml" +"task": "belebele_ind_Latn" +"test_split": "ind_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_isl_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_isl_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69f9bb4e372ce1a39057ce4b70a7e48d23d199e2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_isl_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "isl_Latn" +"include": "_default_template_yaml" +"task": "belebele_isl_Latn" +"test_split": "isl_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8af667d1ad67568d17f6c833979d8b04f9b8cffe --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "kaz_Cyrl" +"include": "_default_template_yaml" +"task": "belebele_kaz_Cyrl" +"test_split": "kaz_Cyrl" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39641c836b4ccf13ff16a730ef0ddd0ed6cc0962 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "khm_Khmr" +"include": "_default_template_yaml" +"task": "belebele_khm_Khmr" +"test_split": "khm_Khmr" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdee22168b7536d2063e1d1602cb9032d97cb357 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "lao_Laoo" +"include": "_default_template_yaml" +"task": "belebele_lao_Laoo" +"test_split": "lao_Laoo" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lug_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lug_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3de4c1cd96efadff9245b077b1d5cfca78a8e292 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_lug_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "lug_Latn" +"include": "_default_template_yaml" +"task": "belebele_lug_Latn" +"test_split": "lug_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mal_Mlym.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mal_Mlym.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4387bd6df7a36d0a28ae2a7c27b7bc95d267a70e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mal_Mlym.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mal_Mlym" +"include": "_default_template_yaml" +"task": "belebele_mal_Mlym" +"test_split": "mal_Mlym" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9887108a949eae7be2e17e28a7ed9f81559f303 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mkd_Cyrl" +"include": "_default_template_yaml" +"task": "belebele_mkd_Cyrl" +"test_split": "mkd_Cyrl" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_npi_Deva.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_npi_Deva.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8126174671cdcc412958b90d1bc3051a8d4386a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_npi_Deva.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "npi_Deva" +"include": "_default_template_yaml" +"task": "belebele_npi_Deva" +"test_split": "npi_Deva" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ory_Orya.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ory_Orya.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5590560aaac88be6ec8dc90353d308d23c759323 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ory_Orya.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ory_Orya" +"include": "_default_template_yaml" +"task": "belebele_ory_Orya" +"test_split": "ory_Orya" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pan_Guru.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pan_Guru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6017b44d3d2090de73dd8ea759eac1675608f5e8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pan_Guru.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "pan_Guru" +"include": "_default_template_yaml" +"task": "belebele_pan_Guru" +"test_split": "pan_Guru" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pol_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pol_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebfcf3534e53c10bfe370643bfc50fc94df5602c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_pol_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "pol_Latn" +"include": "_default_template_yaml" +"task": "belebele_pol_Latn" +"test_split": "pol_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sin_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sin_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c035361f22ab53504eb6a94c4a23c787ab92c05 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sin_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "sin_Latn" +"include": "_default_template_yaml" +"task": "belebele_sin_Latn" +"test_split": "sin_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_slv_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_slv_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e3e5338231b478c0c9614dabb6e26c7dd11d994 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_slv_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "slv_Latn" +"include": "_default_template_yaml" +"task": "belebele_slv_Latn" +"test_split": "slv_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_snd_Arab.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_snd_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca8d1643f8bd1c98e7742c1ede66242aecae4f7f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_snd_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "snd_Arab" +"include": "_default_template_yaml" +"task": "belebele_snd_Arab" +"test_split": "snd_Arab" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sot_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sot_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f98de9a3dab12ca1af6f570904abc976d11525df --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sot_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "sot_Latn" +"include": "_default_template_yaml" +"task": "belebele_sot_Latn" +"test_split": "sot_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swe_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swe_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ac7a4afffb3db57119df0db971415e48aa94301 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swe_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "swe_Latn" +"include": "_default_template_yaml" +"task": "belebele_swe_Latn" +"test_split": "swe_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swh_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swh_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55845837a155e9fd1525830a263019910c793864 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_swh_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "swh_Latn" +"include": "_default_template_yaml" +"task": "belebele_swh_Latn" +"test_split": "swh_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tam_Taml.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tam_Taml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c81da2f8a7ab0c60628cd834eecb1f70031173b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tam_Taml.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tam_Taml" +"include": "_default_template_yaml" +"task": "belebele_tam_Taml" +"test_split": "tam_Taml" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b71adbc7fe41428fd350b60968fe744a94303db --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tgl_Latn" +"include": "_default_template_yaml" +"task": "belebele_tgl_Latn" +"test_split": "tgl_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tir_Ethi.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tir_Ethi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca902d2a391ea872a2c3a75eded5eadfd3b8a1a6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tir_Ethi.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tir_Ethi" +"include": "_default_template_yaml" +"task": "belebele_tir_Ethi" +"test_split": "tir_Ethi" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tso_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tso_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1dae599eb73aade0b753216421eac391afe89985 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tso_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tso_Latn" +"include": "_default_template_yaml" +"task": "belebele_tso_Latn" +"test_split": "tso_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_war_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_war_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d34508c3137b556b84038a06fcc6a9a2ea91f68c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_war_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "war_Latn" +"include": "_default_template_yaml" +"task": "belebele_war_Latn" +"test_split": "war_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_wol_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_wol_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7683e3d2206e9bfb04ec2a2cf2d068c2be9570c3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_wol_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "wol_Latn" +"include": "_default_template_yaml" +"task": "belebele_wol_Latn" +"test_split": "wol_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_xho_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_xho_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2962f3553a24a64fdcc4ca4e7cdd771c657a5306 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_xho_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "xho_Latn" +"include": "_default_template_yaml" +"task": "belebele_xho_Latn" +"test_split": "xho_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zsm_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zsm_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d31cadf5e9aafb4b1337741dd7247607a36c456e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zsm_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "zsm_Latn" +"include": "_default_template_yaml" +"task": "belebele_zsm_Latn" +"test_split": "zsm_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/coqa/README.md b/lm-evaluation-harness/lm_eval/tasks/coqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..77347e4fd8430ddc1fd7411be84a770d64f9096f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/coqa/README.md @@ -0,0 +1,43 @@ +# CoQA + +### Paper + +Title: `CoQA: A Conversational Question Answering Challenge` + +Abstract: https://arxiv.org/pdf/1808.07042.pdf + +CoQA is a large-scale dataset for building Conversational Question Answering +systems. The goal of the CoQA challenge is to measure the ability of machines to +understand a text passage and answer a series of interconnected questions that +appear in a conversation. + +Homepage: https://stanfordnlp.github.io/coqa/ + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `coqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/coqa/default.yaml b/lm-evaluation-harness/lm_eval/tasks/coqa/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de398c242d04dfd823c32c5fbbb3c3796355d3f6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/coqa/default.yaml @@ -0,0 +1,24 @@ +task: coqa +dataset_path: EleutherAI/coqa +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{story}} {{question.input_text|join('\n')}}" +generation_kwargs: + until: + - "\nQ:" +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true +metadata: + version: 3.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/coqa/utils.py b/lm-evaluation-harness/lm_eval/tasks/coqa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29911cfec5cd345b41c631064a7e281b9d15000e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/coqa/utils.py @@ -0,0 +1,77 @@ +from itertools import zip_longest + +import transformers.data.metrics.squad_metrics as squad_metrics + + +def doc_to_text(doc): + # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} + # and a question qi, the task is to predict the answer ai + doc_text = doc["story"] + "\n\n" + for q, a in zip_longest( + doc["questions"]["input_text"], doc["answers"]["input_text"][:-1] + ): # omit target answer ai + question = f"Q: {q}\n\n" + answer = f"A: {a}\n\n" if a is not None else "A:" + doc_text += question + answer + return doc_text + + +def doc_to_target(doc): + turn_id = len(doc["questions"]["input_text"]) + # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers). + answers = [] + answer_forturn = doc["answers"]["input_text"][turn_id - 1] + answers.append(answer_forturn) + + additional_answers = doc.get("additional_answers") + if additional_answers: + for key in additional_answers: + additional_answer_for_turn = additional_answers[key]["input_text"][ + turn_id - 1 + ] + if additional_answer_for_turn.lower() not in map(str.lower, answers): + answers.append(additional_answer_for_turn) + return answers + + +def em(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + + return em_sum / max(1, len(gold_list)) + + +def compute_scores(gold_list, pred): + # tests for exact match and on the normalised answer (compute_exact) + # test for overlap (compute_f1) + f1_sum = 0.0 + em_sum = 0.0 + if len(gold_list) > 1: + for i in range(len(gold_list)): + gold_answers = gold_list[0:i] + gold_list[i + 1 :] + # predictions compared against (n) golds and take maximum + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers) + else: + em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list) + f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list) + + return { + "em": em_sum / max(1, len(gold_list)), + "f1": f1_sum / max(1, len(gold_list)), + } + + +def process_results(doc, results): + gold_list = doc_to_target(doc) + pred = results[0].strip().split("\n")[0] + + scores = compute_scores(gold_list, pred) + return scores diff --git a/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md b/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13339dfa46366298389e3ad0d3910b00db2c417e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gsm8k/README.md @@ -0,0 +1,59 @@ +# GSM8k + +## Paper +Training Verifiers to Solve Math Word Problems +https://arxiv.org/abs/2110.14168 + +State-of-the-art language models can match human performance on many tasks, but +they still struggle to robustly perform multi-step mathematical reasoning. To +diagnose the failures of current models and support research, we introduce GSM8K, +a dataset of 8.5K high quality linguistically diverse grade school math word problems. +We find that even the largest transformer models fail to achieve high test performance, +despite the conceptual simplicity of this problem distribution. + +NOTE: See the official implementation of the task: + https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py +for how to make use of the dataset's calculator annotations in your language +model's sample/generation function. + +Homepage: https://github.com/openai/grade-school-math + + +## Citation +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +- `math_word_problems` +- `chain_of_thought` +- `self_consistency` + +#### Tasks + +- `gsm8k_yaml` +- `gsm8k_cot`: GSM8K with Chain-of-Thought +- `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d92ee342d18bb2e9f2da7573fd0c72ddd65db9c8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml @@ -0,0 +1,34 @@ +include: gsm8k-cot.yaml +group: + - chain_of_thought + - self_consistency +task: gsm8k_cot_self_consistency +generation_kwargs: + until: + - "Q:" + - "\n\n" + do_sample: true + temperature: 0.2 +repeats: 64 +filter_list: + - name: "score-first" # pick only the first response, and report metrics on that + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" + - name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" + - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal. + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75d4468ac02d551d135ef78a752aba0d157e72ab --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml @@ -0,0 +1,44 @@ +group: + - math_word_problems +task: gsm8k_cot_zeroshot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Q: {{question}}\nA: Let's think step by step." +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e55020258930e400ace1fc8cb85949e1af347a13 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k-cot.yaml @@ -0,0 +1,51 @@ +group: + - chain_of_thought +task: gsm8k_cot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +test_split: test +doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\ +Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\ +Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\ +Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\ +Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\ +Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\ +Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\ +Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\ +Q: {{question}}\nA:" +doc_to_target: "{{answer.split('####')[-1].strip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 + num_fewshot: 8 diff --git a/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c4ef836b1b21177d40c10e410cf69051c98e9e3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/gsm8k/gsm8k.yaml @@ -0,0 +1,45 @@ +group: + - math_word_problems +task: gsm8k +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/README.md b/lm-evaluation-harness/lm_eval/tasks/mgsm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90f8e44bb05394cb95c121946febbaaad6c48d27 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/README.md @@ -0,0 +1,94 @@ +# MGSM + +### Paper + +Title: `Language Models are Multilingual Chain-of-Thought Reasoners` + +Abstract: https://arxiv.org/abs/2210.03057 + +Multilingual Grade School Math Benchmark (MGSM) is a benchmark of grade-school math problems, proposed in the paper [Language models are multilingual chain-of-thought reasoners](http://arxiv.org/abs/2210.03057). + +The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are: +- Spanish +- French +- German +- Russian +- Chinese +- Japanese +- Thai +- Swahili +- Bengali +- Telugu + +GSM8K (Grade School Math 8K) is a dataset of 8.5K high quality linguistically diverse grade school math word problems. The dataset was created to support the task of question answering on basic mathematical problems that require multi-step reasoning. + +You can find the input and targets for each of the ten languages (and English) as `.tsv` files. +We also include few-shot exemplars that are also manually translated from each language in `exemplars.py`. + +Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm + + +### Citation + +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +@misc{shi2022language, + title={Language Models are Multilingual Chain-of-Thought Reasoners}, + author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei}, + year={2022}, + eprint={2210.03057}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `mgsm_direct`: Direct question + * `mgsm_direct_bn`: Bengali + * `mgsm_direct_de`: German + * `mgsm_direct_en`: English + * `mgsm_direct_es`: Spanish + * `mgsm_direct_fr`: French + * `mgsm_direct_ja`: Japanese + * `mgsm_direct_ru`: Russian + * `mgsm_direct_sw`: Swahili + * `mgsm_direct_te`: Telugu + * `mgsm_direct_th`: Thai + * `mgsm_direct_zh`: Chinese +* `mgsm_cot_native`: Question with Answer followed by CoT prompt in the same language as the dataset. + * `mgsm_cot_native_bn`: Bengali + * `mgsm_cot_native_de`: German + * `mgsm_cot_native_en`: English + * `mgsm_cot_native_es`: Spanish + * `mgsm_cot_native_fr`: French + * `mgsm_cot_native_ja`: Japanese + * `mgsm_cot_native_ru`: Russian + * `mgsm_cot_native_sw`: Swahili + * `mgsm_cot_native_te`: Telugu + * `mgsm_cot_native_th`: Thai + * `mgsm_cot_native_zh`: Chinese + +Examplar Samples: https://github.com/google-research/url-nlp/blob/main/mgsm/exemplars.py + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/cot_yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/cot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4d502ee52f4389d4331be7dcde287d1c47c3f59 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/cot_yaml @@ -0,0 +1,36 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_cot_native +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f27a616487aadcda9ac0f6f4e549d9bcd8e26dc1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_en diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc748306a473dd11beace7d35ac7453f187c7abb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Pregunta: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Pregunta:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_es diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d36dd813a3b86b6300620ec5c74ad0154017edf9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question : "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question :' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_fr diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2bfeb1dafe3cbd989ba3999394b1ea9a294504f5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Задача: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Задача:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_ru diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f37cd3b87eb3660a701eec29ca1d51cc3c630e4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Swali: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Swali:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_sw diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f45004aacfd93bc4786b9ebd42cc6283d9a31785 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"问题: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - '问题:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_zh diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/gen_yaml.sh b/lm-evaluation-harness/lm_eval/tasks/mgsm/gen_yaml.sh new file mode 100644 index 0000000000000000000000000000000000000000..27cbbcfdc7ae6bddb463de0c7ceb8ec467ec9c3b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/gen_yaml.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +python utils.py --overwrite --output-dir direct --mode direct +python utils.py --overwrite --output-dir en_cot --mode en-cot +python utils.py --overwrite --output-dir native_cot --mode native-cot diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/cot_yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/cot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbba882225b1d7c9fbe10352c64a381c97a547c7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/cot_yaml @@ -0,0 +1,31 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_cot_native +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +# target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f4701796945b74fe884a73d931debdf2c7b5ce9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: Die Antwort lautet (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Frage:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_de diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2033b335fb51ec1310f98b4e905f18231c1b68a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_en diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e56bd0b15150e1e435b4d304255c0a751246e86 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: "+question+"\nステップごとの答え:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: 答えは(\-?[0-9\.\,]+)です。 + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - '問題:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_ja diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4da793dbc78485cb8167a6fc069b87f7590c960f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: Jibu ni (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Swali:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_sw diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1cdbaca8893b6ee626084135c7a64ccd02737b81 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: సమాధానం (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'ప్రశ్న:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_te diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6931d3a2ff44ab0de25a31a7624f2cd104c655c2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: คำตอบคือ (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'โจทย์:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_th diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f0d7e2dcecaecee05671a636b0a3e27eeeee95e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: 答案是 (\-?[0-9\.\,]+)。 + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - '问题:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_zh diff --git a/lm-evaluation-harness/lm_eval/tasks/mgsm/utils.py b/lm-evaluation-harness/lm_eval/tasks/mgsm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..116214f9f4c45ffb9a04757ca41c58114180b259 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mgsm/utils.py @@ -0,0 +1,228 @@ +import argparse + +import yaml + + +LANGUAGES = { + "bn": { # Bengali + # "QUESTION": "প্রশ্ন:", + "QUESTION": "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8:", + # "ANSWER": "ধাপে ধাপে উত্তর:", + "ANSWER": "\u09a7\u09be\u09aa\u09c7 \u09a7\u09be\u09aa\u09c7 \u0989\u09a4\u09cd\u09a4\u09b0:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + }, + "de": { # German + "QUESTION": "Frage:", + # "ANSWER": "Schritt-für-Schritt-Antwort:", + "ANSWER": "Schritt-f\u00fcr-Schritt-Antwort:", + "DIRECT": "Antwort:", + "REGEX": "Die Antwort lautet (\\-?[0-9\\.\\,]+)", + }, + "en": { # English + "QUESTION": "Question:", + "ANSWER": "Step-by-Step Answer:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + }, + "es": { # Spanish + "QUESTION": "Pregunta:", + "ANSWER": "Respuesta paso a paso:", + "DIRECT": "Respuesta:", + "REGEX": "La respuesta es (\\-?[0-9\\.\\,]+)", + }, + "fr": { # French + "QUESTION": "Question :", + # "ANSWER": "Réponse étape par étape :" + "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", + # "DIRECT": "Réponse :", + "DIRECT": "R\u00e9ponse :", + # "REGEX": "La réponse est (\\-?[0-9\\.\\,]+)", + "REGEX": "La r\u00e9ponse est (\\-?[0-9\\.\\,]+)", + }, + "ru": { # Russian + # "QUESTION": "Задача:", + "QUESTION": "\u0417\u0430\u0434\u0430\u0447\u0430:", + # "ANSWER": "Пошаговоерешение:", + "ANSWER": "\u041f\u043e\u0448\u0430\u0433\u043e\u0432\u043e\u0435\u0440\u0435\u0448\u0435\u043d\u0438\u0435:", + "DIRECT": "Answer:", + # "REGEX": "Ответ — (\\-?[0-9\\.\\,]+)", + "REGEX": "\u041e\u0442\u0432\u0435\u0442 \u2014 (\\-?[0-9\\.\\,]+)", + }, + "sw": { # Swahili + "QUESTION": "Swali:", + "ANSWER": "Jibu la Hatua kwa Hatua:", + "DIRECT": "Answer:", + "REGEX": "Jibu ni (\\-?[0-9\\.\\,]+)", + }, + "te": { # Telugu + # "QUESTION": "ప్రశ్న:", + "QUESTION": "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28:", + # "ANSWER": "దశలవారీగా సమాధానం:", + "ANSWER": "\u0c26\u0c36\u0c32\u0c35\u0c3e\u0c30\u0c40\u0c17\u0c3e \u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02:", + "DIRECT": "Answer:", + # "REGEX": "సమాధానం (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02 (\\-?[0-9\\.\\,]+)", + }, + "th": { # Thai + # "QUESTION": "โจทย์:", + "QUESTION": "\u0e42\u0e08\u0e17\u0e22\u0e4c:", + # "ANSWER": "คำตอบทีละขั้นตอน:", + "ANSWER": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e17\u0e35\u0e25\u0e30\u0e02\u0e31\u0e49\u0e19\u0e15\u0e2d\u0e19:", + "DIRECT": "Answer:", + # "REGEX": "คำตอบคือ (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e04\u0e37\u0e2d (\\-?[0-9\\.\\,]+)", + }, + "ja": { # Japanese + # "QUESTION": "問題:", + "QUESTION": "\u554f\u984c:", + # "ANSWER": "ステップごとの答え:", + "ANSWER": "\u30b9\u30c6\u30c3\u30d7\u3054\u3068\u306e\u7b54\u3048:", + "DIRECT": "Answer:", + # "REGEX": "答えは(\\-?[0-9\\.\\,]+)です。", + "REGEX": "\u7b54\u3048\u306f(\\-?[0-9\\.\\,]+)\u3067\u3059\u3002", + }, + "zh": { # Chinese + # "QUESTION": "问题:", + "QUESTION": "\u95ee\u9898:", + # "ANSWER": "逐步解答:", + "ANSWER": "\u9010\u6b65\u89e3\u7b54:", + "DIRECT": "Answer:", + # "REGEX": "答案是 (\\-?[0-9\\.\\,]+)。", + "REGEX": "\u7b54\u6848\u662f (\\-?[0-9\\.\\,]+)\u3002", + }, +} + + +def add_regex_pattern(regex_pattern): + if regex_pattern is None: + return {} + return { + "filter_list": [ + { + "name": "strict-match", + "filter": [ + { + "function": "regex", + "regex_pattern": f"""{regex_pattern}""", + }, + { + "function": "take_first", + }, + ], + }, + { + "name": "flexible-extract", + "filter": [ + { + "function": "regex", + "regex_pattern": """(-?[$0-9.,]{2,})|(-?[0-9]+)""", + "group_select": -1, + }, + { + "function": "take_first", + }, + ], + }, + ], + } + + +def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + try: + QUESTION = LANGUAGES[lang]["QUESTION"] + + yaml_template = "cot_yaml" + filter_list = {} + DELIMITER = None + if mode == "direct": + ANSWER = LANGUAGES[lang]["DIRECT"] + REGEX = None + task_name = f"mgsm_direct_{lang}" + yaml_template = "direct_yaml" + elif mode == "native-cot": + ANSWER = LANGUAGES[lang]["ANSWER"] + REGEX = LANGUAGES[lang]["REGEX"] + task_name = f"mgsm_native_cot_{lang}" + filter_list = add_regex_pattern(REGEX) + DELIMITER = "" if lang in ["zh", "ja"] else None + elif mode == "en-cot": + ANSWER = LANGUAGES["en"]["ANSWER"] + REGEX = LANGUAGES["en"]["REGEX"] + task_name = f"mgsm_en_cot_{lang}" + + file_name = f"{task_name}.yaml" + ANSWER_TO_SKIP = len(LANGUAGES[lang]["ANSWER"]) + 1 + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": yaml_template, + "dataset_name": lang, + "task": f"{task_name}", + "doc_to_text": f"""{{% if answer is not none %}}""" + f"""{{{{question+"\\n{ANSWER}"}}}}""" + f"""{{% else %}}""" + f"""{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}""" + f"""{{% endif %}}""", + "doc_to_target": f"""{{% if answer is not none %}}""" + f"""{{{{answer[{ANSWER_TO_SKIP}:]}}}}""" + f"""{{% else %}}""" + f"""{{{{answer_number|string}}}}""" + f"""{{% endif %}}""", + **filter_list, + "generation_kwargs": { + "until": [QUESTION, "", "<|im_end|>"], + "do_sample": False, + }, + **({"target_delimiter": DELIMITER} if DELIMITER else {}), + }, + f, + allow_unicode=True, + width=float("inf"), + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + parser.add_argument( + "--mode", + default="native-cot", + choices=["direct", "native-cot", "en-cot"], + help="Mode of chain-of-thought", + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite, mode=args.mode) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/pubmedqa/README.md b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c738dd2af65eecaee764cbeaf6a74aea308a0547 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/README.md @@ -0,0 +1,56 @@ +# PubMedQA + +### Paper + +Title: `PubMedQA: A Dataset for Biomedical Research Question Answering` + +Abstract: https://arxiv.org/abs/1909.06146 + +PubMedQA is a novel biomedical question answering (QA) dataset collected from +PubMed abstracts. The task of PubMedQA is to answer research questions with +yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after +coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA +has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA +instances. Each PubMedQA instance is composed of (1) a question which is either +an existing research article title or derived from one, (2) a context which is +the corresponding abstract without its conclusion, (3) a long answer, which is +the conclusion of the abstract and, presumably, answers the research question, +and (4) a yes/no/maybe answer which summarizes the conclusion. + +Homepage: https://pubmedqa.github.io/ + + +### Citation + +``` +@inproceedings{jin2019pubmedqa, + title={PubMedQA: A Dataset for Biomedical Research Question Answering}, + author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua}, + booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, + pages={2567--2577}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `pubmed_qa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py new file mode 100644 index 0000000000000000000000000000000000000000..0dccf9408a12ad5b1a0874ae9b8b0155e1db7ebf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py @@ -0,0 +1,6 @@ +def doc_to_text(doc) -> str: + ctxs = "\n".join(doc["CONTEXTS"]) + return "Abstract: {}\nQuestion: {}\nAnswer:".format( + ctxs, + doc["QUESTION"], + ) diff --git a/lm-evaluation-harness/lm_eval/tasks/pubmedqa/pubmedqa.yaml b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/pubmedqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47de2fa0980a0a45facbab4416c80373e91e08d5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/pubmedqa/pubmedqa.yaml @@ -0,0 +1,16 @@ +task: pubmedqa +dataset_path: bigbio/pubmed_qa +dataset_name: pubmed_qa_labeled_fold0_source +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function preprocess_pubmedqa.doc_to_text +doc_to_target: final_decision +doc_to_choice: ["yes", "no", "maybe"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/aggregate.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/aggregate.py new file mode 100644 index 0000000000000000000000000000000000000000..4b99849f9bfa8307006879666ecf971b17b511b2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/aggregate.py @@ -0,0 +1,13 @@ +import numpy as np +import sklearn + + +def cb_multi_fi(items): + preds, golds = zip(*items) + preds = np.array(preds) + golds = np.array(golds) + f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0) + f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1) + f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2) + avg_f1 = np.mean([f11, f12, f13]) + return avg_f1 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a388299f6496673a3edc9c5047fddd1a14302e4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: multirc +dataset_path: super_glue +dataset_name: multirc +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{paragraph}}\nQuestion: {{question}}\nAnswer:" +doc_to_target: label +doc_to_choice: "['''{{answer}}\\nIs the answer correct? yes''', '''{{answer}}\\nIs the answer correct? no''']" +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..927a357158abf96502f955470fcd8afbe0eee49c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5-prompt.yaml @@ -0,0 +1,23 @@ +group: + - super-glue-t5-prompt +task: super_glue-multirc-t5-prompt +dataset_path: super_glue +dataset_name: multirc +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "multirc question: {{question}} answer: {{answer}} paragraph: {{paragraph}}" +doc_to_target: label +doc_to_choice: "{% set group_id = idx.question|string %}{{[group_id+'_False', group_id+'_True']}}" +generation_kwargs: + until: + - "" +metric_list: + - metric: !function t5_utils.f1 + aggregation: !function t5_utils.agg_f1 + higher_is_better: true + - metric: !function t5_utils.em + aggregation: !function t5_utils.agg_em + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5_utils.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d17d498fa25db9a6d7f56e03c43c9e661d66f9f1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/multirc/t5_utils.py @@ -0,0 +1,53 @@ +import collections + +import numpy as np +import sklearn.metrics + + +def f1(predictions, references): # This is a passthrough function + _prediction = predictions[0] + _reference = references[0].split("_")[-1] + string_label = ["False", "True"] + reference = string_label.index(_reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) + + return (prediction, reference) + + +def agg_f1(items): + predictions, references = zip(*items) + references, predictions = np.asarray(references), np.asarray(predictions) + + return sklearn.metrics.f1_score(references, predictions) + + +def em(predictions, references): # This is a passthrough function + _prediction = predictions[0] + _group, _reference = references[0].split("_") + string_label = ["False", "True"] + reference = string_label.index(_reference) + prediction = ( + string_label.index(_prediction) + if _prediction in string_label + else not bool(reference) + ) + + return (_group, prediction, reference) + + +def agg_em(items): + grouped_values = collections.defaultdict(lambda: ([], [])) + for group, prediction, reference in items: + grouped_values[group][0].append(reference) + grouped_values[group][1].append(prediction) + + group_scores = [] + for group, (targets, predictions) in grouped_values.items(): + score = float(np.array_equal(targets, predictions)) + group_scores.append(score) + + return np.mean(group_scores) diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/record/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca978fd2ab4db0661ac12185169bc9b8517d1fe8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/default.yaml @@ -0,0 +1,21 @@ +group: + - super-glue-lm-eval-v1 +task: record +dataset_path: super_glue +dataset_name: record +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function util.doc_to_text +doc_to_target: !function util.doc_to_target +doc_to_choice: !function util.doc_to_choice +process_docs: !function util.process_docs +process_results: !function util.process_results +metric_list: + - metric: f1 + aggregation: mean + - metric: em + higher_is_better: True + aggregation: mean +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c999bc90301ecc92ec36292a9544733a370b5e69 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-record-t5-prompt +dataset_path: super_glue +dataset_name: record +validation_split: validation +output_type: generate_until +process_docs: !function t5_utils.process_docs +doc_to_text: !function t5_utils.doc_to_text +doc_to_target: "{{idx.passage|string}}+{{idx.query}}_{{answers}}" +generation_kwargs: + until: + - "" +metric_list: + - metric: !function t5_utils.em + aggregation: !function t5_utils.squad_em_agg + higher_is_better: true + - metric: !function t5_utils.f1 + aggregation: !function t5_utils.squad_f1_agg + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5_utils.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e1a29a9498cad497c7f19d4a24b0e55d287992be --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/t5_utils.py @@ -0,0 +1,132 @@ +import collections +import re +import string + +import numpy as np +from datasets import Dataset + +from lm_eval.api.metrics import metric_max_over_ground_truths + + +def doc_to_text(doc): + passage = doc["passage"] + passage = re.sub(r"(\.|\?|\!|\"|\')\n@highlight\n", r"\1 ", passage) + passage = re.sub(r"\n@highlight\n", ". ", passage) + + return " ".join( + [ + "record query:", + doc["query"], + "entities:", + ", ".join(doc["entities"]), + "passage:", + passage, + ] + ) + + +def process_docs(dataset): + def split_answers(doc): + split_doc = { + **{k: [] for k in doc.keys()}, + } + answers = doc.pop("answers") + for idx, answer in enumerate(answers): + for key in split_doc.keys(): + if key in doc: + split_doc[key].append(doc[key]) + + split_doc["answers"].append(answer) + return split_doc + + dataset = dataset.map(split_answers) + new_dataset = {} + for key in dataset.features.keys(): + new_dataset[key] = [x for row in dataset[key] for x in row] + + return Dataset.from_dict(new_dataset) + + +def normalize_squad(answer): + """Normalization used in official SQuAD evaluation script.""" + + def _normalize_answer(text, punc_chars, punc_repl): + """Lower text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(s): + return re.sub(r"\b(a|an|the)\b", " ", s) + + def replace_punctuation(s): + to_replace = set(punc_chars) + return "".join(punc_repl if ch in to_replace else ch for ch in s) + + def white_space_fix(s): + return " ".join(s.split()) + + text = text.lower() + text = replace_punctuation(text) + text = remove_articles(text) + text = white_space_fix(text) + + return text + + return _normalize_answer(answer, punc_chars=string.punctuation, punc_repl="") + + +def em(predictions, references): # This is a passthrough function + return (predictions[0], references[0]) + + +def f1(predictions, references): # This is a passthrough function + return (predictions[0], references[0]) + + +def squad_em_agg(items): + def _exact_match_score(prediction, target): + return target == prediction + + grouped_values = collections.defaultdict(lambda: ([], [])) + for prediction, reference in items: + group, reference = reference.split("_") + # if group not in grouped_values: + grouped_values[group][0].append(normalize_squad(prediction)) + grouped_values[group][1].append(normalize_squad(reference)) + + em = [] + for group in grouped_values.keys(): + predictions, targets = grouped_values[group] + for p in predictions: + em.append(metric_max_over_ground_truths(_exact_match_score, p, targets)) + + return np.mean(em) + + +def squad_f1_agg(items): + def _f1_score(prediction, target): + """Computes token f1 score for a single target and prediction.""" + prediction_tokens = prediction.split() + target_tokens = target.split() + common = collections.Counter(prediction_tokens) & collections.Counter( + target_tokens + ) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(target_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + grouped_values = collections.defaultdict(lambda: ([], [])) + for prediction, reference in items: + group, reference = reference.split("_") + if group not in grouped_values: + grouped_values[group][0].append(normalize_squad(prediction)) + grouped_values[group][1].append(normalize_squad(reference)) + + f1 = [] + for group in grouped_values.keys(): + p, t = grouped_values[group] + f1.append(metric_max_over_ground_truths(_f1_score, p[0], t)) + + return np.mean(f1) diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/record/util.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/util.py new file mode 100644 index 0000000000000000000000000000000000000000..252dba44eb1b8a806209b4d5519ea2ba79d12e17 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/record/util.py @@ -0,0 +1,60 @@ +import datasets +import numpy as np +import transformers.data.metrics.squad_metrics as squad_metrics + +from lm_eval.api.metrics import metric_max_over_ground_truths + + +def doc_to_text(doc): + initial_text, *highlights = doc["passage"].strip().split("\n@highlight\n") + text = initial_text + "\n\n" + for highlight in highlights: + text += f" - {highlight}.\n" + return text + + +def format_answer(query, entity): + return f" - {query}".replace("@placeholder", entity) + + +def doc_to_target(doc): + # We only output the first correct entity in a doc + return format_answer(query=doc["query"], entity=doc["answers"][0]) + + +def doc_to_choice(doc): + return [format_answer(query=doc["query"], entity=ans) for ans in doc["entities"]] + + +def process_docs(dataset: datasets.Dataset): + def _process_doc(doc): + return { + "passage": doc["passage"], + "query": doc["query"], + "entities": sorted(list(set(doc["entities"]))), + "answers": sorted(list(set(doc["answers"]))), + } + + return dataset.map(_process_doc) + + +def process_results(doc, results): + # ReCoRD's evaluation is actually deceptively simple: + # - Pick the maximum likelihood prediction entity + # - Evaluate the accuracy and token F1 PER EXAMPLE + # - Average over all examples + max_idx = np.argmax(np.array([result[0] for result in results])) + + prediction = doc["entities"][max_idx] + gold_label_set = doc["answers"] + f1 = metric_max_over_ground_truths( + squad_metrics.compute_f1, prediction, gold_label_set + ) + em = metric_max_over_ground_truths( + squad_metrics.compute_exact, prediction, gold_label_set + ) + + return { + "f1": f1, + "em": em, + } diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6754af1a1e5688110ab9853e1d53e833ef02dd29 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: sglue_rte +dataset_path: super_glue +dataset_name: rte +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True or False?\nAnswer:" +doc_to_target: label +doc_to_choice: ['True', 'False'] +metric_list: + - metric: acc +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e80686e2a36cbe2a3851ba18fe12130894b7ad7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/rte/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-rte-t5-prompt +dataset_path: super_glue +dataset_name: rte +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "rte hypothesis: {{hypothesis}} premise: {{premise}}" +doc_to_target: label +doc_to_choice: ['entailment', 'not_entailment'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0