applied-ai-018 commited on
Commit
053f734
·
verified ·
1 Parent(s): c6d4165

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/belebele/_default_template_yaml +19 -0
  2. lm-evaluation/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml +4 -0
  3. lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Arab.yaml +4 -0
  4. lm-evaluation/lm_eval/tasks/belebele/belebele_asm_Beng.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Latn.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/belebele/belebele_est_Latn.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Deva.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/belebele/belebele_ilo_Latn.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/belebele/belebele_ind_Latn.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/belebele/belebele_isl_Latn.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/belebele/belebele_kac_Latn.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/belebele/belebele_kat_Geor.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/belebele/belebele_lug_Latn.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/belebele/belebele_mri_Latn.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/belebele/belebele_nld_Latn.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/belebele/belebele_nob_Latn.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Latn.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/belebele/belebele_pes_Arab.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/belebele/belebele_plt_Latn.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/belebele/belebele_pol_Latn.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/belebele/belebele_por_Latn.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/belebele/belebele_ron_Latn.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/belebele/belebele_shn_Mymr.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/belebele/belebele_slk_Latn.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/belebele/belebele_slv_Latn.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/belebele/belebele_som_Latn.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/belebele/belebele_swe_Latn.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/belebele/belebele_tel_Telu.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/belebele/belebele_yor_Latn.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hans.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/eus_trivia/README.md +54 -0
  39. lm-evaluation/lm_eval/tasks/eus_trivia/eus_trivia.yaml +16 -0
  40. lm-evaluation/lm_eval/tasks/eus_trivia/utils.py +41 -0
  41. lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml +38 -0
  42. lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +26 -0
  44. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +38 -0
  45. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/utils.py +39 -0
  49. lm-evaluation/lm_eval/tasks/gpqa/n_shot/_generate_configs.py +26 -0
  50. lm-evaluation/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml +21 -0
lm-evaluation/lm_eval/tasks/belebele/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: belebele
2
+ dataset_path: facebook/belebele
3
+ fewshot_config:
4
+ sampler: first_n
5
+ output_type: multiple_choice
6
+ should_decontaminate: true
7
+ doc_to_decontamination_query: "{{question}}"
8
+ doc_to_text: "P: {{flores_passage}}\nQ: {{question.strip()}}\nA: {{mc_answer1}}\nB: {{mc_answer2}}\nC: {{mc_answer3}}\nD: {{mc_answer4}}\nAnswer:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: "{{['1', '2', '3', '4'].index(correct_answer_num)}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.0
lm-evaluation/lm_eval/tasks/belebele/belebele_amh_Ethi.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "amh_Ethi"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_amh_Ethi"
4
+ "test_split": "amh_Ethi"
lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "arb_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_arb_Arab"
4
+ "test_split": "arb_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_asm_Beng.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "asm_Beng"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_asm_Beng"
4
+ "test_split": "asm_Beng"
lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ben_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ben_Latn"
4
+ "test_split": "ben_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_est_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "est_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_est_Latn"
4
+ "test_split": "est_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "fuv_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_fuv_Latn"
4
+ "test_split": "fuv_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Deva.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hin_Deva"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hin_Deva"
4
+ "test_split": "hin_Deva"
lm-evaluation/lm_eval/tasks/belebele/belebele_ilo_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ilo_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ilo_Latn"
4
+ "test_split": "ilo_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ind_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ind_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ind_Latn"
4
+ "test_split": "ind_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_isl_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "isl_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_isl_Latn"
4
+ "test_split": "isl_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "jpn_Jpan"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_jpn_Jpan"
4
+ "test_split": "jpn_Jpan"
lm-evaluation/lm_eval/tasks/belebele/belebele_kac_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kac_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kac_Latn"
4
+ "test_split": "kac_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_kat_Geor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kat_Geor"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kat_Geor"
4
+ "test_split": "kat_Geor"
lm-evaluation/lm_eval/tasks/belebele/belebele_khm_Khmr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "khm_Khmr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_khm_Khmr"
4
+ "test_split": "khm_Khmr"
lm-evaluation/lm_eval/tasks/belebele/belebele_lug_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lug_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lug_Latn"
4
+ "test_split": "lug_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mkd_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mkd_Cyrl"
4
+ "test_split": "mkd_Cyrl"
lm-evaluation/lm_eval/tasks/belebele/belebele_mri_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mri_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mri_Latn"
4
+ "test_split": "mri_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_nld_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "nld_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_nld_Latn"
4
+ "test_split": "nld_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_nob_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "nob_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_nob_Latn"
4
+ "test_split": "nob_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "npi_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_npi_Latn"
4
+ "test_split": "npi_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_pes_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "pes_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_pes_Arab"
4
+ "test_split": "pes_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_plt_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "plt_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_plt_Latn"
4
+ "test_split": "plt_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_pol_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "pol_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_pol_Latn"
4
+ "test_split": "pol_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_por_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "por_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_por_Latn"
4
+ "test_split": "por_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ron_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ron_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ron_Latn"
4
+ "test_split": "ron_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "rus_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_rus_Cyrl"
4
+ "test_split": "rus_Cyrl"
lm-evaluation/lm_eval/tasks/belebele/belebele_shn_Mymr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "shn_Mymr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_shn_Mymr"
4
+ "test_split": "shn_Mymr"
lm-evaluation/lm_eval/tasks/belebele/belebele_slk_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "slk_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_slk_Latn"
4
+ "test_split": "slk_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_slv_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "slv_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_slv_Latn"
4
+ "test_split": "slv_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_som_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "som_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_som_Latn"
4
+ "test_split": "som_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_swe_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "swe_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_swe_Latn"
4
+ "test_split": "swe_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_tel_Telu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "tel_Telu"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_tel_Telu"
4
+ "test_split": "tel_Telu"
lm-evaluation/lm_eval/tasks/belebele/belebele_tsn_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "tsn_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_tsn_Latn"
4
+ "test_split": "tsn_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "uzn_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_uzn_Latn"
4
+ "test_split": "uzn_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_yor_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "yor_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_yor_Latn"
4
+ "test_split": "yor_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "zho_Hans"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_zho_Hans"
4
+ "test_split": "zho_Hans"
lm-evaluation/lm_eval/tasks/eus_trivia/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EusTrivia
2
+
3
+ ### Paper
4
+
5
+ Title: Latxa: An Open Language Model and Evaluation Suite for Basque
6
+
7
+ Abstract: https://arxiv.org/abs/2403.20266
8
+
9
+ EusTrivia consists of 1,715 trivia questions from multiple online sources. 56.3\% of the questions are elementary level (grades 3-6), while the rest are considered challenging. A significant portion of the questions focus specifically on the Basque Country, its language and culture. Each multiple-choice question contains two, three or four choices (3.84 on average) and a single correct answer. Five areas of knowledge are covered:
10
+
11
+ - **Humanities and Natural Sciences** (27.8%): This category encompasses questions about history, geography, biology, ecology and other social and natural sciences.
12
+ - **Leisure and Art** (24.5%): This category includes questions on sports and athletes, performative and plastic arts and artists, architecture, cultural events, and related topics.
13
+ - **Music** (16.0%): Here are grouped all the questions about music and musicians, both classical and contemporary.
14
+ - **Language and Literature** (17.1%): This category is concerned with all kinds of literature productions and writers, as well as metalinguistic questions (e.g., definitions, synonyms, and word usage).
15
+ - **Mathematics and ICT** (14.5%): This category covers mathematical problems and questions about ICT, as well as questions about people known for their contributions to these fields of knowledge.
16
+
17
+ Homepage: https://github.com/hitz-zentroa/latxa
18
+
19
+
20
+ ### Citation
21
+
22
+ ```
23
+ @misc{etxaniz2024latxa,
24
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
25
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
26
+ year={2024},
27
+ eprint={2403.20266},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ ```
32
+
33
+ ### Groups and Tasks
34
+
35
+ #### Groups
36
+
37
+ There are no groups.
38
+
39
+ #### Tasks
40
+
41
+ * `eus_trivia`: EusTrivia consists of 1,715 trivia questions from multiple online sources.
42
+
43
+ ### Checklist
44
+
45
+ For adding novel benchmarks/datasets to the library:
46
+ * [ ] Is the task an existing benchmark in the literature?
47
+ * [ ] Have you referenced the original paper that introduced the task?
48
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
49
+
50
+
51
+ If other tasks on this dataset are already supported:
52
+ * [ ] Is the "Main" variant of this task clearly denoted?
53
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
54
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/eus_trivia/eus_trivia.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: HiTZ/EusTrivia
2
+ dataset_name: default
3
+ task: eus_trivia
4
+ doc_to_text: !function utils.doc_to_text
5
+ doc_to_choice: !function utils.doc_to_choice
6
+ validation_split: null
7
+ test_split: test
8
+ fewshot_split: test
9
+ output_type: multiple_choice
10
+ doc_to_target: answer
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ version: 0.0
lm-evaluation/lm_eval/tasks/eus_trivia/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+
4
+ letters = ["A", "B", "C", "D"]
5
+
6
+
7
+ def doc_to_text(doc) -> str:
8
+ """
9
+ Converts a document to a formatted string.
10
+
11
+ Args:
12
+ doc (dict): A dictionary containing the document information.
13
+
14
+ Returns:
15
+ str: A formatted string containing the question and answer choices.
16
+ """
17
+ candidates = doc["candidates"]
18
+ num_choices = len(candidates)
19
+ if num_choices < 2:
20
+ raise ValueError("Invalid number of candidates")
21
+ choices = letters[:num_choices]
22
+ formatted_choices = "\n".join(
23
+ [f"{choice}: {candidates[i]}" for i, choice in enumerate(choices)]
24
+ )
25
+ return f"Galdera: {doc['question']}\n{formatted_choices}\nErantzuna:"
26
+
27
+
28
+ def doc_to_choice(doc) -> List[str]:
29
+ """
30
+ Returns the answer choices for a document.
31
+
32
+ Args:
33
+ doc (dict): A dictionary containing the document information.
34
+
35
+ Returns:
36
+ list: A list of strings containing the answer choices.
37
+ """
38
+ num_choices = len(doc["candidates"])
39
+ if num_choices < 2:
40
+ raise ValueError("Invalid number of candidates")
41
+ return letters[:num_choices]
lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ do_sample: false
30
+ temperature: 0.0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_main_cot_n_shot
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
10
+ doc_to_target: answer
11
+ filter_list:
12
+ - name: "strict-match"
13
+ filter:
14
+ - function: "regex"
15
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
16
+ - function: "take_first"
17
+ - name: "flexible-extract"
18
+ filter:
19
+ - function: "multi_choice_regex"
20
+ group_select: -1
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ regex_pattern: "(\\([A-Z]\\))"
24
+ - function: "take_first"
25
+ generation_kwargs:
26
+ until:
27
+ - "</s>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ num_fewshot: 0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_diamond_cot_zeroshot
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_extended_cot_zeroshot
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_main_cot_zeroshot
lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
lm-evaluation/lm_eval/tasks/gpqa/n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_n_shot.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_gpqa_n_shot_yaml",
16
+ "task": f"gpqa_{task}_n_shot",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0