diff --git a/lm-evaluation/lm_eval/tasks/belebele/_default_template_yaml b/lm-evaluation/lm_eval/tasks/belebele/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..2583ced5688e1a0f97f3c46b1bc64d54c329a172 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/_default_template_yaml @@ -0,0 +1,19 @@ +group: belebele +dataset_path: facebook/belebele +fewshot_config: + sampler: first_n +output_type: multiple_choice +should_decontaminate: true +doc_to_decontamination_query: "{{question}}" +doc_to_text: "P: {{flores_passage}}\nQ: {{question.strip()}}\nA: {{mc_answer1}}\nB: {{mc_answer2}}\nC: {{mc_answer3}}\nD: {{mc_answer4}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: "{{['1', '2', '3', '4'].index(correct_answer_num)}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3d61066cf9ccdce81c9c5b91682c1616cc28e6a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "amh_Ethi" +"include": "_default_template_yaml" +"task": "belebele_amh_Ethi" +"test_split": "amh_Ethi" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Arab.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6242fd38b2666f496751b8ed03639f712f350e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "arb_Arab" +"include": "_default_template_yaml" +"task": "belebele_arb_Arab" +"test_split": "arb_Arab" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_asm_Beng.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_asm_Beng.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c24a1deceac5c596705e95c03e43b327363be9ae --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_asm_Beng.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "asm_Beng" +"include": "_default_template_yaml" +"task": "belebele_asm_Beng" +"test_split": "asm_Beng" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8736c5242e5b8ff3f717c317292a42fd718db5b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ben_Latn" +"include": "_default_template_yaml" +"task": "belebele_ben_Latn" +"test_split": "ben_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_est_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_est_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6a56ca90c0309d9475adad9b95db272577658f36 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_est_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "est_Latn" +"include": "_default_template_yaml" +"task": "belebele_est_Latn" +"test_split": "est_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2636cae850c6222906fac7f3c1533ea16684ee73 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "fuv_Latn" +"include": "_default_template_yaml" +"task": "belebele_fuv_Latn" +"test_split": "fuv_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Deva.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Deva.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fa304c9b09100d17edd37a5f7caa1b11f5d22df --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Deva.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "hin_Deva" +"include": "_default_template_yaml" +"task": "belebele_hin_Deva" +"test_split": "hin_Deva" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ilo_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ilo_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc3065da1c1a8c968845910a6f330c262a6a8a8e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ilo_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ilo_Latn" +"include": "_default_template_yaml" +"task": "belebele_ilo_Latn" +"test_split": "ilo_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ind_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ind_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c15fff3ec36a0b3b140581f317553b2fa0e2e62c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ind_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ind_Latn" +"include": "_default_template_yaml" +"task": "belebele_ind_Latn" +"test_split": "ind_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_isl_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_isl_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69f9bb4e372ce1a39057ce4b70a7e48d23d199e2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_isl_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "isl_Latn" +"include": "_default_template_yaml" +"task": "belebele_isl_Latn" +"test_split": "isl_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73b60502bc90b9e2128f9e2eb72046cf961b1054 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "jpn_Jpan" +"include": "_default_template_yaml" +"task": "belebele_jpn_Jpan" +"test_split": "jpn_Jpan" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_kac_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_kac_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..002bc9a15882eb9f51f745baa79cd22313ea92aa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_kac_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "kac_Latn" +"include": "_default_template_yaml" +"task": "belebele_kac_Latn" +"test_split": "kac_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_kat_Geor.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_kat_Geor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6392d29bbe98cf3c6e228d20c90dbb7cf1281789 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_kat_Geor.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "kat_Geor" +"include": "_default_template_yaml" +"task": "belebele_kat_Geor" +"test_split": "kat_Geor" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39641c836b4ccf13ff16a730ef0ddd0ed6cc0962 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "khm_Khmr" +"include": "_default_template_yaml" +"task": "belebele_khm_Khmr" +"test_split": "khm_Khmr" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_lug_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_lug_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3de4c1cd96efadff9245b077b1d5cfca78a8e292 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_lug_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "lug_Latn" +"include": "_default_template_yaml" +"task": "belebele_lug_Latn" +"test_split": "lug_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9887108a949eae7be2e17e28a7ed9f81559f303 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mkd_Cyrl" +"include": "_default_template_yaml" +"task": "belebele_mkd_Cyrl" +"test_split": "mkd_Cyrl" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_mri_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_mri_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d704cdc478175da6c8894a08da2d2e177f895ed2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_mri_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mri_Latn" +"include": "_default_template_yaml" +"task": "belebele_mri_Latn" +"test_split": "mri_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_nld_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_nld_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aea069996198c6bad23ed44969d3bc840ad04442 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_nld_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "nld_Latn" +"include": "_default_template_yaml" +"task": "belebele_nld_Latn" +"test_split": "nld_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_nob_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_nob_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf824f3b9de3ce40db37060d2348c4a7b60a4c00 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_nob_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "nob_Latn" +"include": "_default_template_yaml" +"task": "belebele_nob_Latn" +"test_split": "nob_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2ff68ad9a17c247ec2c8a9df996df7dbb9e115a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "npi_Latn" +"include": "_default_template_yaml" +"task": "belebele_npi_Latn" +"test_split": "npi_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_pes_Arab.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_pes_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ceda24e2a9be857e76584dff421da71427f6c50e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_pes_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "pes_Arab" +"include": "_default_template_yaml" +"task": "belebele_pes_Arab" +"test_split": "pes_Arab" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_plt_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_plt_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17889ae3556278624d79fe9bc632310a3008c66c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_plt_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "plt_Latn" +"include": "_default_template_yaml" +"task": "belebele_plt_Latn" +"test_split": "plt_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_pol_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_pol_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebfcf3534e53c10bfe370643bfc50fc94df5602c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_pol_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "pol_Latn" +"include": "_default_template_yaml" +"task": "belebele_pol_Latn" +"test_split": "pol_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_por_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_por_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f63ace02d5a3fb8cc1b0b85a279c49370567b16c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_por_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "por_Latn" +"include": "_default_template_yaml" +"task": "belebele_por_Latn" +"test_split": "por_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_ron_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_ron_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab1ad0889b4797ce6068fd1c8741fc45e29cb588 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_ron_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ron_Latn" +"include": "_default_template_yaml" +"task": "belebele_ron_Latn" +"test_split": "ron_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..280768c7c61d44a0a9e7bb8aeae64ebbecfdd84a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "rus_Cyrl" +"include": "_default_template_yaml" +"task": "belebele_rus_Cyrl" +"test_split": "rus_Cyrl" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_shn_Mymr.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_shn_Mymr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75bbd05b16c54b593729f9a8f7b4499c25d2f5ed --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_shn_Mymr.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "shn_Mymr" +"include": "_default_template_yaml" +"task": "belebele_shn_Mymr" +"test_split": "shn_Mymr" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_slk_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_slk_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cddd1eb1e67713b5c635e598ece58d115dc0b4c0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_slk_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "slk_Latn" +"include": "_default_template_yaml" +"task": "belebele_slk_Latn" +"test_split": "slk_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_slv_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_slv_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e3e5338231b478c0c9614dabb6e26c7dd11d994 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_slv_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "slv_Latn" +"include": "_default_template_yaml" +"task": "belebele_slv_Latn" +"test_split": "slv_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_som_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_som_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa1d4329d878e141ffbbb3f7faed774abbfccacb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_som_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "som_Latn" +"include": "_default_template_yaml" +"task": "belebele_som_Latn" +"test_split": "som_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_swe_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_swe_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ac7a4afffb3db57119df0db971415e48aa94301 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_swe_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "swe_Latn" +"include": "_default_template_yaml" +"task": "belebele_swe_Latn" +"test_split": "swe_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_tel_Telu.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_tel_Telu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de44fcc4848927331994d5ca42dce064c7758483 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_tel_Telu.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tel_Telu" +"include": "_default_template_yaml" +"task": "belebele_tel_Telu" +"test_split": "tel_Telu" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9c7aa83cf7f4195ad3f113f1420046d8986ed8a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tsn_Latn" +"include": "_default_template_yaml" +"task": "belebele_tsn_Latn" +"test_split": "tsn_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..109aebdd5fc3c6a07083821dae0f61c0037b6ae2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "uzn_Latn" +"include": "_default_template_yaml" +"task": "belebele_uzn_Latn" +"test_split": "uzn_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_yor_Latn.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_yor_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9eb295cc0471885c83a4114b0551d134611d8c56 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_yor_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "yor_Latn" +"include": "_default_template_yaml" +"task": "belebele_yor_Latn" +"test_split": "yor_Latn" diff --git a/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hans.yaml b/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32b0860bdaa2806161c32d9d8359d02ec61140d2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hans.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "zho_Hans" +"include": "_default_template_yaml" +"task": "belebele_zho_Hans" +"test_split": "zho_Hans" diff --git a/lm-evaluation/lm_eval/tasks/eus_trivia/README.md b/lm-evaluation/lm_eval/tasks/eus_trivia/README.md new file mode 100644 index 0000000000000000000000000000000000000000..88e760e43592d93ba27ee3b19c4edd0fc6f3e9f6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_trivia/README.md @@ -0,0 +1,54 @@ +# EusTrivia + +### Paper + +Title: Latxa: An Open Language Model and Evaluation Suite for Basque + +Abstract: https://arxiv.org/abs/2403.20266 + +EusTrivia consists of 1,715 trivia questions from multiple online sources. 56.3\% of the questions are elementary level (grades 3-6), while the rest are considered challenging. A significant portion of the questions focus specifically on the Basque Country, its language and culture. Each multiple-choice question contains two, three or four choices (3.84 on average) and a single correct answer. Five areas of knowledge are covered: + +- **Humanities and Natural Sciences** (27.8%): This category encompasses questions about history, geography, biology, ecology and other social and natural sciences. +- **Leisure and Art** (24.5%): This category includes questions on sports and athletes, performative and plastic arts and artists, architecture, cultural events, and related topics. +- **Music** (16.0%): Here are grouped all the questions about music and musicians, both classical and contemporary. +- **Language and Literature** (17.1%): This category is concerned with all kinds of literature productions and writers, as well as metalinguistic questions (e.g., definitions, synonyms, and word usage). +- **Mathematics and ICT** (14.5%): This category covers mathematical problems and questions about ICT, as well as questions about people known for their contributions to these fields of knowledge. + +Homepage: https://github.com/hitz-zentroa/latxa + + +### Citation + +``` +@misc{etxaniz2024latxa, + title={Latxa: An Open Language Model and Evaluation Suite for Basque}, + author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa}, + year={2024}, + eprint={2403.20266}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +There are no groups. + +#### Tasks + +* `eus_trivia`: EusTrivia consists of 1,715 trivia questions from multiple online sources. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/eus_trivia/eus_trivia.yaml b/lm-evaluation/lm_eval/tasks/eus_trivia/eus_trivia.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe93ab61725867ae39d9be17ae33f9b769046683 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_trivia/eus_trivia.yaml @@ -0,0 +1,16 @@ +dataset_path: HiTZ/EusTrivia +dataset_name: default +task: eus_trivia +doc_to_text: !function utils.doc_to_text +doc_to_choice: !function utils.doc_to_choice +validation_split: null +test_split: test +fewshot_split: test +output_type: multiple_choice +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation/lm_eval/tasks/eus_trivia/utils.py b/lm-evaluation/lm_eval/tasks/eus_trivia/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e5802c795bf558eacb60a05db6c344e925f6e4fa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/eus_trivia/utils.py @@ -0,0 +1,41 @@ +from typing import List + + +letters = ["A", "B", "C", "D"] + + +def doc_to_text(doc) -> str: + """ + Converts a document to a formatted string. + + Args: + doc (dict): A dictionary containing the document information. + + Returns: + str: A formatted string containing the question and answer choices. + """ + candidates = doc["candidates"] + num_choices = len(candidates) + if num_choices < 2: + raise ValueError("Invalid number of candidates") + choices = letters[:num_choices] + formatted_choices = "\n".join( + [f"{choice}: {candidates[i]}" for i, choice in enumerate(choices)] + ) + return f"Galdera: {doc['question']}\n{formatted_choices}\nErantzuna:" + + +def doc_to_choice(doc) -> List[str]: + """ + Returns the answer choices for a document. + + Args: + doc (dict): A dictionary containing the document information. + + Returns: + list: A list of strings containing the answer choices. + """ + num_choices = len(doc["candidates"]) + if num_choices < 2: + raise ValueError("Invalid number of candidates") + return letters[:num_choices] diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..193539b92001223df8052167624d94e0c997d2cd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml @@ -0,0 +1,38 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: generate_until +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n" +doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: " +doc_to_target: answer +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "multi_choice_regex" + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..916b6ea06a2e22042344b668191adbb3c91c4e75 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_cot_n_shot_yaml +task: gpqa_main_cot_n_shot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..bda00784cc2fa26b5f0d488cf7b6aea37243353d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + setting = "cot_zeroshot" + for task in tqdm(subset): + file_name = f"gpqa_{task}_{setting}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": f"_gpqa_{setting}_yaml", + "task": f"gpqa_{task}_{setting}", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..df99f272c99a343d4250c44e3618f85e9e2a0682 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml @@ -0,0 +1,38 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: generate_until +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: " +doc_to_target: answer +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "(?<=The answer is )(.*)(?=.)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "multi_choice_regex" + group_select: -1 + ignore_case: true + ignore_punctuation: true + regex_pattern: "(\\([A-Z]\\))" + - function: "take_first" +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +num_fewshot: 0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e6a840fa1815096f5fa180ed06223e3523a06214 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_cot_zeroshot_yaml +task: gpqa_diamond_cot_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f542a6148f231e2d7e7e2a5a3437047459e3856 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_extended +include: _gpqa_cot_zeroshot_yaml +task: gpqa_extended_cot_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c14604854294c4551e2602e573488c6a7fef254 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_cot_zeroshot_yaml +task: gpqa_main_cot_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/utils.py b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..96bcd52b140fd0a5896f55c0a52ea2fd5453fd53 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/utils.py @@ -0,0 +1,39 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + random.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "choices": [choices[0], choices[1], choices[2], choices[3]], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_generate_configs.py b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..c01f208e767cb813e6d2116caf74c3d0b2fccfb3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + + for task in tqdm(subset): + file_name = f"gpqa_{task}_n_shot.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": "_gpqa_n_shot_yaml", + "task": f"gpqa_{task}_n_shot", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..96d4fc25ff1360e3ff989961230383a2de59b7eb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml @@ -0,0 +1,21 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: multiple_choice +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n" +doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:" +doc_to_target: answer +doc_to_choice: ["(A)", "(B)", "(C)", "(D)"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3043a7e53647ff72d535abc113dfccebaa1bd43c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_n_shot_yaml +task: gpqa_diamond_n_shot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d16b505b355bccb3d6fd70eb16b307c12d06a09 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_extended +include: _gpqa_n_shot_yaml +task: gpqa_extended_n_shot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e5f3e9532ab41c0158409e6afb47393806c4177 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_n_shot_yaml +task: gpqa_main_n_shot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/n_shot/utils.py b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e0b886d2879216094214ce534438e4db0c5e60f8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/n_shot/utils.py @@ -0,0 +1,41 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +rng = random.Random(42) + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + rng.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..79afbd6f1d8d4b2eb54455d734f6245357580bd3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py @@ -0,0 +1,26 @@ +import yaml +from tqdm import tqdm + + +def main() -> None: + subset = ["extended", "diamond", "main"] + setting = "zeroshot" + for task in tqdm(subset): + file_name = f"gpqa_{task}_{setting}.yaml" + try: + with open(f"{file_name}", "w") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": f"_gpqa_{setting}_yaml", + "task": f"gpqa_{task}_{setting}", + "dataset_name": f"gpqa_{task}", + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..707641b5f0c6243d48f77c6a4a56d5ec824baa4e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml @@ -0,0 +1,21 @@ +dataset_path: Idavidrein/gpqa +group: gpqa +output_type: multiple_choice +process_docs: !function utils.process_docs +training_split: train +# Because huggingface dataset only has train split +validation_split: train +test_split: null +doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:" +doc_to_target: answer +doc_to_choice: ["(A)", "(B)", "(C)", "(D)"] +num_fewshot: 0 +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3a7921c30b3ff09e82aacb4c0e915010f698966 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_diamond +include: _gpqa_zeroshot_yaml +task: gpqa_diamond_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5e7347f11154351ad4560200a3f3bf54106a1a8f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_extended +include: _gpqa_zeroshot_yaml +task: gpqa_extended_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a8d7fb59025d148130f2a468cb1bbdfad959102 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: gpqa_main +include: _gpqa_zeroshot_yaml +task: gpqa_main_zeroshot diff --git a/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/utils.py b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c2317e02efd132aea27ec8c8fad284df55ccd382 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/gpqa/zeroshot/utils.py @@ -0,0 +1,38 @@ +import random +import re + +import datasets + + +def preprocess(text): + if text is None: + return " " + text = text.strip() + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + choices = [ + preprocess(doc["Incorrect Answer 1"]), + preprocess(doc["Incorrect Answer 2"]), + preprocess(doc["Incorrect Answer 3"]), + preprocess(doc["Correct Answer"]), + ] + + random.shuffle(choices) + correct_answer_index = choices.index(preprocess(doc["Correct Answer"])) + + out_doc = { + "choice1": choices[0], + "choice2": choices[1], + "choice3": choices[2], + "choice4": choices[3], + "answer": f"({chr(65 + correct_answer_index)})", + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc b/lm-evaluation/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f50474f8f794d42b4b83ac620126f0d73bc5f606 Binary files /dev/null and b/lm-evaluation/lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_as.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_as.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ac7094316356a1d29fbda640465cd59f39acff7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_as.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-as +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_as + +task: indiccopa_as diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_bn.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02bca115f39b13bc9e59ed7aa289b3af172555a0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_bn.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-bn +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_bn + +task: indiccopa_bn diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_kn.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4eff187f13f72e9510d11b3addf411f773962cbb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_kn.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-kn +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_kn + +task: indiccopa_kn diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_mr.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_mr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc74086dd322a6f906377a317d1767c2f882ed80 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_mr.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-mr +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_mr + +task: indiccopa_mr diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sa.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..044a26740ccc43a1d0c2c4c8db8baf2d72f11f25 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sa.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-sa +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_sa + +task: indiccopa_sa diff --git a/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sd.yaml b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sd.yaml new file mode 100644 index 0000000000000000000000000000000000000000..31bf0624ff025d680a2cfcdf579ad16841c5b7fd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indiccopa/indiccopa_sd.yaml @@ -0,0 +1,5 @@ +dataset_name: translation-sd +include: indiccopa_common_yaml +doc_to_text: !function utils.doc_to_text_sd + +task: indiccopa_sd diff --git a/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_common_yaml b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..a450bde02357d9b64efc47ffbd9b9634394b7287 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_common_yaml @@ -0,0 +1,22 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: Divyanshu/indicxnli + +dataset_path: Divyanshu/indicxnli +# dataset_name: null +output_type: multiple_choice + +training_split: train +validation_split: validation + +doc_to_text: null +doc_to_target: label +doc_to_choice: null + +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_gu.yaml b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e8c8cb337d95056c67941b8b743a61ab49e072b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_gu.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: gu +doc_to_choice: '{{[premise+", સાચું? હા, "+hypothesis,premise+", સાચું? તેનાથી, "+hypothesis,premise+", + સાચું? ના, "+hypothesis]}}' +doc_to_text: '' +include: indicxnli_common_yaml +task: indicxnli_gu diff --git a/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_hi.yaml b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8897d99a6ea0b89b642ab2ddd75c11afeae42fae --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/indicxnli/indicxnli_hi.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: hi +doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+", + सही? नहीं, "+hypothesis]}}' +doc_to_text: '' +include: indicxnli_common_yaml +task: indicxnli_hi diff --git a/lm-evaluation/lm_eval/tasks/mc_taco/README.md b/lm-evaluation/lm_eval/tasks/mc_taco/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2bab6369468ecead4f3cfae9964e3a04d5e06423 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mc_taco/README.md @@ -0,0 +1,53 @@ +# MC Taco + +### Paper + +Title: `"Going on a vacation" takes longer than "Going for a walk": A Study of Temporal Commonsense Understanding` +Abstract: https://arxiv.org/abs/1909.03065 + +MC-TACO is a dataset of 13k question-answer pairs that require temporal commonsense +comprehension. The dataset contains five temporal properties, (1) duration (how long +an event takes), (2) temporal ordering (typical order of events), (3) typical time +(when an event occurs), (4) frequency (how often an event occurs), and (5) stationarity +(whether a state is maintained for a very long time or indefinitely). + +WARNING: Running this task with a `--limit` arg will give misleading results! The +corresponding dataset is structured such that each multiple-choice-question gathered +by the authors is split into question-option pairs, where each such pair gets +siloed into an individual document for plausibility testing. Because the harness +shuffles these documents, setting `--limit` will likely "cut off" certain candidate +answers. This is a problem because the task's metrics require an exhaustive evaluation +of a question's options. See section 4 of the paper for details. + +Homepage: https://leaderboard.allenai.org/mctaco/submissions/public + + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `mc_taco` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/mc_taco/default.yaml b/lm-evaluation/lm_eval/tasks/mc_taco/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16aee3f7e76098acdd53ec88adf5cc078e3a5907 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/mc_taco/default.yaml @@ -0,0 +1,15 @@ +task: mc_taco +dataset_path: mc_taco +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: "{{sentence}}\nQuestion: {{question}}\nAnswer: {{answer}}\nPlausible:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: "{{question}} {{sentence}}" +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation/lm_eval/tasks/squadv2/README.md b/lm-evaluation/lm_eval/tasks/squadv2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bad0c4e2d80ec17c3f4a4c2f15db2ce6a6632db4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/squadv2/README.md @@ -0,0 +1,54 @@ +# Task-name + +### Paper + +Title: `Know What You Don’t Know: Unanswerable Questions for SQuAD` +Abstract: https://arxiv.org/abs/1806.03822 + +Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, +consisting of questions posed by crowdworkers on a set of Wikipedia articles, +where the answer to every question is a segment of text, or span, from the +corresponding reading passage, or the question might be unanswerable. +SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable +questions written adversarially by crowdworkers to look similar to answerable ones. +To do well on SQuAD2.0, systems must not only answer questions when possible, but +also determine when no answer is supported by the paragraph and abstain from answering. + +Homepage: https://rajpurkar.github.io/SQuAD-explorer/ + + +### Citation + +``` +@misc{rajpurkar2018know, + title={Know What You Don't Know: Unanswerable Questions for SQuAD}, + author={Pranav Rajpurkar and Robin Jia and Percy Liang}, + year={2018}, + eprint={1806.03822}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `squadv2`: `Default squadv2 task` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/lm_eval/tasks/squadv2/squadv2.yaml b/lm-evaluation/lm_eval/tasks/squadv2/squadv2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13e451645cc23284f3b45f15527c365410118617 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/squadv2/squadv2.yaml @@ -0,0 +1,2 @@ +task: squadv2 +class: !function task.SQuAD2 diff --git a/lm-evaluation/lm_eval/tasks/squadv2/task.py b/lm-evaluation/lm_eval/tasks/squadv2/task.py new file mode 100644 index 0000000000000000000000000000000000000000..ef6be3e1fe208893c19163d6dc6f9d3fba38cb8a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/squadv2/task.py @@ -0,0 +1,240 @@ +""" +Know What You Don’t Know: Unanswerable Questions for SQuAD +https://arxiv.org/pdf/1806.03822.pdf + +Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, +consisting of questions posed by crowdworkers on a set of Wikipedia articles, +where the answer to every question is a segment of text, or span, from the +corresponding reading passage, or the question might be unanswerable. +SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable +questions written adversarially by crowdworkers to look similar to answerable ones. +To do well on SQuAD2.0, systems must not only answer questions when possible, but +also determine when no answer is supported by the paragraph and abstain from answering. + +Homepage: https://rajpurkar.github.io/SQuAD-explorer/ +""" +from functools import partial +from math import exp + +import datasets +from packaging import version + +from lm_eval.api.instance import Instance +from lm_eval.api.task import ConfigurableTask + + +_CITATION = """ +@misc{rajpurkar2018know, + title={Know What You Don't Know: Unanswerable Questions for SQuAD}, + author={Pranav Rajpurkar and Robin Jia and Percy Liang}, + year={2018}, + eprint={1806.03822}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +""" + + +def _squad_metric(predictions, references): + squad_metric = datasets.load_metric("squad_v2") + return squad_metric.compute(predictions=predictions, references=references) + + +def _squad_agg(key, items): + predictions, references = zip(*items) + + return _squad_metric(predictions=predictions, references=references).get(key, 0) + + +class SQuAD2(ConfigurableTask): + VERSION = 3 + DATASET_PATH = "squad_v2" + DATASET_NAME = None + + def __init__(self): + super().__init__(config={"metadata": {"version": self.VERSION}}) + + # HF changed squad on us so we have to make sure we aren't running the old one + assert version.parse(datasets.__version__) >= version.parse( + "1.11.0" + ), "datasets v1.11.0 or later required for SQuAD" + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return False + + def training_docs(self): + return self.dataset["train"] + + def validation_docs(self): + return self.dataset["validation"] + + def doc_to_text(self, doc): + return ( + "Title: " + + doc["title"] + + "\n\n" + + "Background: " + + doc["context"] + + "\n\n" + + "Question: " + + doc["question"] + + "\n\n" + + "Answer:" + ) + + def should_decontaminate(self): + return True + + def doc_to_decontamination_query(self, doc): + return doc["context"] + + def doc_to_target(self, doc): + answer_list = doc["answers"]["text"] + if len(answer_list) > 0: + answer = answer_list[0] + else: + answer = "unanswerable" + return " " + answer + + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + """ + + return [ + Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs, + ), + Instance( + request_type="loglikelihood", + doc=doc, + arguments=(ctx, " " + "unanswerable"), + idx=0, + **kwargs, + ), + ] + + def process_results(self, doc, results): + """Take a single document and the LM results and evaluates, returning a + dict where keys are the names of submetrics and values are the values of + the metric for that one document + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param results: + The results of the requests created in construct_requests. + """ + + continuation, (logprob_unanswerable, _) = results + + no_answer_probability = exp(logprob_unanswerable) + + predictions = { + "id": doc["id"], + "prediction_text": continuation, + "no_answer_probability": no_answer_probability, + } + + references = { + "id": doc["id"], + "answers": doc["answers"], + } + + return { + "exact": ( + predictions, + references, + ), # Exact match (the normalized answer exactly match the gold answer) + "f1": ( + predictions, + references, + ), # The F-score of predicted tokens versus the gold answer + "HasAns_exact": ( + predictions, + references, + ), # Exact match (the normalized answer exactly match the gold answer) + "HasAns_f1": ( + predictions, + references, + ), # The F-score of predicted tokens versus the gold answer + "NoAns_exact": ( + predictions, + references, + ), # Exact match (the normalized answer exactly match the gold answer) + "NoAns_f1": ( + predictions, + references, + ), # The F-score of predicted tokens versus the gold answer + "best_exact": ( + predictions, + references, + ), # Best exact match (with varying threshold) + "best_f1": (predictions, references), # Best F1 (with varying threshold) + } + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return { + "exact": partial( + _squad_agg, "exact" + ), # Exact match (the normalized answer exactly match the gold answer) + "f1": partial( + _squad_agg, "f1" + ), # The F-score of predicted tokens versus the gold answer + "HasAns_exact": partial( + _squad_agg, "HasAns_exact" + ), # Exact match (the normalized answer exactly match the gold answer) + "HasAns_f1": partial( + _squad_agg, "HasAns_f1" + ), # The F-score of predicted tokens versus the gold answer + "NoAns_exact": partial( + _squad_agg, "NoAns_exact" + ), # Exact match (the normalized answer exactly match the gold answer) + "NoAns_f1": partial( + _squad_agg, "NoAns_f1" + ), # The F-score of predicted tokens versus the gold answer + "best_exact": partial( + _squad_agg, "best_exact" + ), # Best exact match (with varying threshold) + "best_f1": partial( + _squad_agg, "best_f1" + ), # Best F1 (with varying threshold) + } + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return { + "exact": True, # Exact match (the normalized answer exactly match the gold answer) + "f1": True, # The F-score of predicted tokens versus the gold answer + "HasAns_exact": True, # Exact match (the normalized answer exactly match the gold answer) + "HasAns_f1": True, # The F-score of predicted tokens versus the gold answer + "NoAns_exact": True, # Exact match (the normalized answer exactly match the gold answer) + "NoAns_f1": True, # The F-score of predicted tokens versus the gold answer + "best_exact": True, # Best exact match (with varying threshold) + "best_f1": True, # Best F1 (with varying threshold) + }