applied-ai-018 commited on
Commit
800c031
·
verified ·
1 Parent(s): 7aa368c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/basqueglue/README.md +72 -0
  2. lm-evaluation/lm_eval/tasks/basqueglue/bec.yaml +16 -0
  3. lm-evaluation/lm_eval/tasks/basqueglue/qnli.yaml +16 -0
  4. lm-evaluation/lm_eval/tasks/basqueglue/vaxx.yaml +16 -0
  5. lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml +5 -0
  6. lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_three_objects.yaml +5 -0
  7. lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/snarks.yaml +5 -0
  8. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml +19 -0
  9. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml +19 -0
  10. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml +19 -0
  11. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml +17 -0
  12. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml +19 -0
  13. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/snarks.yaml +19 -0
  14. lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml +15 -0
  15. lm-evaluation/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml +14 -0
  16. lm-evaluation/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml +331 -0
  17. lm-evaluation/lm_eval/tasks/benchmarks/multimedqa/README.md +43 -0
  18. lm-evaluation/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml +17 -0
  19. lm-evaluation/lm_eval/tasks/code_x_glue/code-text/bleu.py +241 -0
  20. lm-evaluation/lm_eval/tasks/code_x_glue/code-text/python.yaml +21 -0
  21. lm-evaluation/lm_eval/tasks/coqa/README.md +43 -0
  22. lm-evaluation/lm_eval/tasks/coqa/default.yaml +24 -0
  23. lm-evaluation/lm_eval/tasks/coqa/utils.py +77 -0
  24. lm-evaluation/lm_eval/tasks/eq_bench/README.md +55 -0
  25. lm-evaluation/lm_eval/tasks/eq_bench/default.yaml +20 -0
  26. lm-evaluation/lm_eval/tasks/eq_bench/utils.py +54 -0
  27. lm-evaluation/lm_eval/tasks/lambada_cloze/README.md +56 -0
  28. lm-evaluation/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml +21 -0
  29. lm-evaluation/lm_eval/tasks/medmcqa/medmcqa.yaml +18 -0
  30. lm-evaluation/lm_eval/tasks/medmcqa/utils_medmcqa.py +24 -0
  31. lm-evaluation/lm_eval/tasks/mgsm/README.md +94 -0
  32. lm-evaluation/lm_eval/tasks/mgsm/direct/direct_yaml +35 -0
  33. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml +12 -0
  34. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml +12 -0
  35. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml +12 -0
  36. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml +12 -0
  37. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml +12 -0
  38. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml +12 -0
  39. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml +12 -0
  40. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml +12 -0
  41. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml +12 -0
  42. lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml +12 -0
  43. lm-evaluation/lm_eval/tasks/mgsm/en_cot/cot_yaml +36 -0
  44. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml +12 -0
  45. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml +12 -0
  46. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml +12 -0
  47. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml +12 -0
  48. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml +12 -0
  49. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml +12 -0
  50. lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml +12 -0
lm-evaluation/lm_eval/tasks/basqueglue/README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BasqueGLUE
2
+
3
+ ### Paper
4
+
5
+ Title: `BasqueGLUE: A Natural Language Understanding Benchmark for Basque`
6
+
7
+ Abstract: `https://aclanthology.org/2022.lrec-1.172/`
8
+
9
+ Natural Language Understanding (NLU) technology has improved significantly over the last few years and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages. In this paper, we present BasqueGLUE, the first NLU benchmark for Basque, a less-resourced language, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. We also report the evaluation of two state-of-the-art language models for Basque on BasqueGLUE, thus providing a strong baseline to compare upon. BasqueGLUE is freely available under an open license.
10
+
11
+ Homepage: `https://github.com/orai-nlp/BasqueGLUE`
12
+
13
+ Title: `Latxa: An Open Language Model and Evaluation Suite for Basque`
14
+
15
+ Abstract: `https://arxiv.org/abs/2403.20266`
16
+
17
+ The use of BasqueGLUE for evaluating the performance of decoder models in Basque is presented in this paper.
18
+
19
+ Homepage: `https://github.com/hitz-zentroa/latxa`
20
+
21
+ ### Citation
22
+
23
+ ```
24
+ @InProceedings{urbizu2022basqueglue,
25
+ author = {Urbizu, Gorka and San Vicente, Iñaki and Saralegi, Xabier and Agerri, Rodrigo and Soroa, Aitor},
26
+ title = {BasqueGLUE: A Natural Language Understanding Benchmark for Basque},
27
+ booktitle = {Proceedings of the Language Resources and Evaluation Conference},
28
+ month = {June},
29
+ year = {2022},
30
+ address = {Marseille, France},
31
+ publisher = {European Language Resources Association},
32
+ pages = {1603--1612},
33
+ url = {https://aclanthology.org/2022.lrec-1.172}
34
+ }
35
+
36
+ @misc{etxaniz2024latxa,
37
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
38
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
39
+ year={2024},
40
+ eprint={2403.20266},
41
+ archivePrefix={arXiv},
42
+ primaryClass={cs.CL}
43
+ }
44
+ ```
45
+
46
+ ### Groups and Tasks
47
+
48
+ #### Groups
49
+
50
+ * `basque-glue`: First version of the implementation
51
+
52
+ #### Tasks
53
+
54
+ * `bhtc_v2`: Topic classification of news extracts with 12 categories.
55
+ * `bec`: Sentiment analysis on tweets about the campaign for the 2016 Basque elections.
56
+ * `vaxx_stance`: Stance detection on tweets around the anti-vaccine movement.
57
+ * `qnlieu`: Q&A NLI as in [glue/qnli](../glue/qnli).
58
+ * `wiceu`: Word-in-Context as in [super_glue/wic](../super_glue/wic).
59
+ * `epec_korref_bin`: Correference detection as in [super_glue/wsc](../super_glue/wsc).
60
+
61
+ ### Checklist
62
+
63
+ For adding novel benchmarks/datasets to the library:
64
+ * [ ] Is the task an existing benchmark in the literature?
65
+ * [ ] Have you referenced the original paper that introduced the task?
66
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
67
+
68
+
69
+ If other tasks on this dataset are already supported:
70
+ * [ ] Is the "Main" variant of this task clearly denoted?
71
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
72
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/basqueglue/bec.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: bec2016eu
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: bec
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Testua: {{text}}\nGaldera: Nolako jarrera agertzen du aurreko testuak?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['negatiboa', 'neutrala', 'positiboa']
11
+ metric_list:
12
+ - metric: f1
13
+ aggregation: !function utils.micro_f1_score
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation/lm_eval/tasks/basqueglue/qnli.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: qnlieu
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: qnli
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "{{question}}\n{{sentence}}\nGaldera: aurreko galderari erantzuten al dio emandako testuak?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['bai', 'ez']
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation/lm_eval/tasks/basqueglue/vaxx.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: vaxx_stance
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: vaxx
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Testua: {{text}}\nGaldera: Nolako jarrera agertzen du aurreko testuak txertoei buruz?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['aurka', 'neutrala', 'alde']
11
+ metric_list:
12
+ - metric: f1
13
+ aggregation: !function utils.vaxx_f1_score
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "geometric_shapes"
2
+ "description": "Name geometric shapes from their SVG paths.\n\n"
3
+ "doc_to_text": "Q: This SVG path element <path d=\"M 31.00,73.00 L 32.00,59.00 L 44.00,50.00 L 49.00,41.00 L 64.00,37.00 L 71.00,55.00 L 64.00,76.00 L 52.00,61.00 L 31.00,73.00\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 9 separate commands.\n(1) M 31.00,73.00: Move the current point to 31.00,73.00.\n(2) L 32.00,59.00: Create a line from 31.00,73.00 to 32.00,59.00.\n(3) L 44.00,50.00: Create a line from 32.00,59.00 to 44.00,50.00.\n(4) L 49.00,41.00: Create a line from 44.00,50.00 to 49.00,41.00.\n(5) L 64.00,37.00: Create a line from 49.00,41.00 to 64.00,37.00.\n(6) L 71.00,55.00: Create a line from 64.00,37.00 to 71.00,55.00.\n(7) L 64.00,76.00: Create a line from 71.00,55.00 to 64.00,76.00.\n(8) L 52.00,61.00: Create a line from 64.00,76.00 to 52.00,61.00.\n(9) L 31.00,73.00: Create a line from 52.00,61.00 to 31.00,73.00.\nThis SVG path starts at point 31.00,73.00, creates eight consecutive and touching lines, and then returns back its starting point, thereby creating an eight-sided shape. It does not have any curves or arches. \"octagon\" is the only eight-sided object on the list. So the answer is (F).\n\nQ: This SVG path element <path d=\"M 14.19,26.04 L 51.43,39.21 L 58.44,36.69 L 56.63,30.17 L 48.53,26.66 L 14.19,26.04\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 6 separate commands.\n(1) M 14.19,26.04: Move the current point to 14.19,26.04.\n(2) L 51.43,39.21: Create a line from 14.19,26.04 to 51.43,39.21.\n(3) L 58.44,36.69: Create a line from 51.43,39.21 to 58.44,36.69.\n(4) L 56.63,30.17: Create a line from 58.44,36.69 to 56.63,30.17.\n(5) L 48.53,26.66: Create a line from 56.63,30.17 to 48.53,26.66.\n(6) L 14.19,26.04: Create a line from 48.53,26.66 to 14.19,26.04.\nThis SVG path starts at point 14.19,26.04, creates five consecutive and touching lines, and then returns back its starting point, thereby creating a five-sided shape. It does not have any curves or arches. \"pentagon\" is the only five-sided polygon on the list. So the answer is (G).\n\nQ: This SVG path element <path d=\"M 41.00,43.00 L 37.00,34.00 L 41.00,33.00 L 45.00,34.00 L 41.00,43.00\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 5 separate commands.\n(1) M 41.00,43.00: Move the current point to 41.00,43.00.\n(2) L 37.00,34.00: Create a line from 41.00,43.00 to 37.00,34.00.\n(3) L 41.00,33.00: Create a line from 37.00,34.00 to 41.00,33.00.\n(4) L 45.00,34.00: Create a line from 41.00,33.00 to 45.00,34.00.\n(5) L 41.00,43.00: Create a line from 45.00,34.00 to 41.00,43.00.\nThis SVG path starts at point 41.00,43.00, creates four consecutive and touching lines, and then returns back its starting point, thereby creating a four-sided shape. \"kite\" and \"rectangle\" are the only two four-sided polygons on the list. So, we need to determine which one is the correct answer.\nA kite has two pairs of equal-length adjacent sides, whereas a rectangle has two pairs of equal-length alternate (opposite) sides. Now, let's check whether the two adjacent sides of this shape are equal.\nLength of side A: |A| = sqrt((41.00-37.00)^2 + (43.00-34.00)^2) = sqrt((4)^2 + (9)^2) = sqrt(16 + 81) = sqrt(97).\nLength of side B: |B| = sqrt((37.00-41.00)^2 + (34.00-33.00)^2)) = sqrt((4)^2 + (1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side C: |C| = sqrt((41.00-45.00)^2 + (33.00-34.00)^2)) = sqrt((-4)^2 + (-1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side D: |D| = sqrt((45.00-41.00)^2 + (34.00-43.00)^2)) = sqrt((4)^2 + (-9)^2) = sqrt(16 + 81) = sqrt(97).\nNote that |A| = |D| and |B| = |C|. Furthermore, A and D are adjacent and B and C are adjacent. Thus, this polygon has two pairs of equal-length adjacent sides and is \"kite\". So the answer is (D).\n\nQ: {{input}}\nA: Let's think step by step.\n"
4
+ "include": "_cot_fewshot_template_yaml"
5
+ "task": "bbh_cot_fewshot_geometric_shapes"
lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_three_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B).\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A).\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).\n\nQ: {{input}}\nA: Let's think step by step.\n"
4
+ "include": "_cot_fewshot_template_yaml"
5
+ "task": "bbh_cot_fewshot_logical_deduction_three_objects"
lm-evaluation/lm_eval/tasks/bbh/cot_fewshot/snarks.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "snarks"
2
+ "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n"
3
+ "doc_to_text": "Q: Which statement is sarcastic?\nOptions:\n(A) Yes, because having interests and actively researching them is a huge waste\n(B) Yes, because having interests and actively researching them is a huge deal\nA: Let's think step by step.\nIf we look at (A), it says that having interests and actively researching them is a huge waste, implying that it is a useless effort. However, we know that having interests and actively researching them is typically not a waste but rather is beneficial to the individual. The presence of such a juxtaposition in (A) suggests that it contains a taste of irony and sarcasm.\nIf we look at (B), it says that having interests and actively researching them is a huge deal, implying that it is an important and consequential effort. This is arguably a neutral and correct statement.\nAbove the above, the sarcastic option is (A). So the answer is (A).\n\nQ: Which statement is sarcastic?\nOptions:\n(A) No one is going to disagree with you on this. Avoiding ad hominem attacks really help your case\n(B) No one is going to disagree with you on this. Ad hominem attacks really help your case\nA: Let's think step by step.\nIf we look at (A), it says that avoiding ad hominem attacks really help your case, implying that ad hominem attacks are adverse and injurious. Because ad hominem attacks are adressed at a person rather than an idea, it is indeed true that avoiding them is often useful and helpful; so, (A) is a neutral (valid and agreeable) statement.\nIf we look at (B), it says that ad hominem attacks really help your case, implying that ad hominem attacks are a positive thing. However, we stated previously that ad hominem attacks are often not useful or constructive. The speaker in this sentence therefore seems to mean the opposite of what they are saying; so, there appears to have a taste of irony and sarcasm in (B).\nAbove the above, the sarcastic option is (B). So the answer is (B).\n\nQ: Which statement is sarcastic?\nOptions:\n(A) Consistency in the league's punishments? What do you think this is supposed to be, politics?\n(B) Consistency in the league's punishments? What do you think this is supposed to be, moral?\nA: Let's think step by step.\nIf we look at (A), it likens the consistency in the league's punishments with that in politics. Because politics or political affairs are often not considered to be consistent or dependable, this sentence appears to be satirical.\nIf we look at (B), it likens the consistency in the league's punishments with that in morality. Discussing the consistency of the league's punishments in the context of morality, ethics, or law makes sense and does not appear to make a satirical point about anything.\nAbove the above, the sarcastic option is (A). So the answer is (A).\n\nQ: {{input}}\nA: Let's think step by step.\n"
4
+ "include": "_cot_fewshot_template_yaml"
5
+ "task": "bbh_cot_fewshot_snarks"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_five_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_five_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_seven_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_seven_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_three_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_logical_deduction_three_objects"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "navigate"
2
+ "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_navigate"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: "regex"
10
+ group_select: -1
11
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
12
+ - function: "take_first"
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
17
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "penguins_in_a_table"
2
+ "description": "Answer questions about a table of penguins and their attributes.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_penguins_in_a_table"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/snarks.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "snarks"
2
+ "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_snarks"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.MultiChoiceRegexFilter
10
+ group_select: -1
11
+ ignore_case: true
12
+ ignore_punctuation: true
13
+ regex_pattern: "(\\([A-Z]\\))"
14
+ - function: "take_first"
15
+ - name: "strict-match"
16
+ filter:
17
+ - function: "regex"
18
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
19
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "word_sorting"
2
+ "description": "Sort a list of words.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA: Let's think step by step."
4
+ "include": "_cot_zeroshot_template_yaml"
5
+ "task": "bbh_cot_zeroshot_word_sorting"
6
+ filter_list:
7
+ - name: "flexible-extract"
8
+ filter:
9
+ - function: !function utils.WordSortFilter
10
+ - function: "take_first"
11
+ - name: "strict-match"
12
+ filter:
13
+ - function: "regex"
14
+ regex_pattern: "((?<=The answer is )(.*)(?=.)|(?<=the answer is )(.*)(?=.)|(?<=The answer: )(.*)(?=.)|(?<=The final answer: )(.*)(?=.))"
15
+ - function: "take_first"
lm-evaluation/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_type: generate_until
2
+ test_split: null
3
+ doc_to_choice: null
4
+ metric_list:
5
+ - metric: exact_match
6
+ aggregation: mean
7
+ higher_is_better: true
8
+ generation_kwargs:
9
+ until:
10
+ - "</s>"
11
+ do_sample: false
12
+ temperature: 0.0
13
+ metadata:
14
+ version: 1.0
lm-evaluation/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: flan_held_in
2
+ group_alias: Flan (Held-In)
3
+ task:
4
+ # ANLI R1
5
+ - group: anli_r1_flan
6
+ group_alias: ANLI R1
7
+ task:
8
+ - task: anli_r1
9
+ task_alias: prompt-0
10
+ include: _held_in_template_yaml
11
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
12
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
13
+ - task: anli_r1
14
+ task_alias: prompt-1
15
+ include: _held_in_template_yaml
16
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
17
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
18
+ - task: anli_r1
19
+ task_alias: prompt-2
20
+ include: _held_in_template_yaml
21
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
22
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
23
+ - task: anli_r1
24
+ task_alias: prompt-3
25
+ include: _held_in_template_yaml
26
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
27
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
28
+ - task: anli_r1
29
+ task_alias: prompt-4
30
+ include: _held_in_template_yaml
31
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
32
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
33
+ - task: anli_r1
34
+ task_alias: prompt-5
35
+ include: _held_in_template_yaml
36
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
37
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
38
+ - task: anli_r1
39
+ task_alias: prompt-6
40
+ include: _held_in_template_yaml
41
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
42
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
43
+ - task: anli_r1
44
+ task_alias: prompt-7
45
+ include: _held_in_template_yaml
46
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
47
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
48
+ - task: anli_r1
49
+ task_alias: prompt-8
50
+ include: _held_in_template_yaml
51
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
52
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
53
+ # ANLI R2
54
+ - group: anli_r2_flan
55
+ group_alias: ANLI R2
56
+ task:
57
+ - task: anli_r2
58
+ task_alias: prompt-0
59
+ include: _held_in_template_yaml
60
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
61
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
62
+ - task: anli_r2
63
+ task_alias: prompt-1
64
+ include: _held_in_template_yaml
65
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
66
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
67
+ - task: anli_r2
68
+ task_alias: prompt-2
69
+ include: _held_in_template_yaml
70
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
71
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
72
+ - task: anli_r2
73
+ task_alias: prompt-3
74
+ include: _held_in_template_yaml
75
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
76
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
77
+ - task: anli_r2
78
+ task_alias: prompt-4
79
+ include: _held_in_template_yaml
80
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
81
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
82
+ - task: anli_r2
83
+ task_alias: prompt-5
84
+ include: _held_in_template_yaml
85
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
86
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
87
+ - task: anli_r2
88
+ task_alias: prompt-6
89
+ include: _held_in_template_yaml
90
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
91
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
92
+ - task: anli_r2
93
+ task_alias: prompt-7
94
+ include: _held_in_template_yaml
95
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
96
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
97
+ - task: anli_r2
98
+ task_alias: prompt-8
99
+ include: _held_in_template_yaml
100
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
101
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
102
+ # ANLI R3
103
+ - group: anli_r3_flan
104
+ group_alias: ANLI R3
105
+ task:
106
+ - task: anli_r3
107
+ task_alias: prompt-0
108
+ include: _held_in_template_yaml
109
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
110
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
111
+ - task: anli_r3
112
+ task_alias: prompt-1
113
+ include: _held_in_template_yaml
114
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
115
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
116
+ - task: anli_r3
117
+ task_alias: prompt-2
118
+ include: _held_in_template_yaml
119
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
120
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
121
+ - task: anli_r3
122
+ task_alias: prompt-3
123
+ include: _held_in_template_yaml
124
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
125
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
126
+ - task: anli_r3
127
+ task_alias: prompt-4
128
+ include: _held_in_template_yaml
129
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
130
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
131
+ - task: anli_r3
132
+ task_alias: prompt-5
133
+ include: _held_in_template_yaml
134
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
135
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
136
+ - task: anli_r3
137
+ task_alias: prompt-6
138
+ include: _held_in_template_yaml
139
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
140
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
141
+ - task: anli_r3
142
+ task_alias: prompt-7
143
+ include: _held_in_template_yaml
144
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
145
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
146
+ - task: anli_r3
147
+ task_alias: prompt-8
148
+ include: _held_in_template_yaml
149
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
150
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
151
+ # Arc Easy
152
+ - group: arc_easy_flan
153
+ group_alias: Arc Easy
154
+ task:
155
+ - task: arc_easy
156
+ task_alias: prompt-0
157
+ include: _held_in_template_yaml
158
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
159
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
160
+ - task: arc_easy
161
+ task_alias: prompt-1
162
+ include: _held_in_template_yaml
163
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
164
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
165
+ - task: arc_easy
166
+ task_alias: prompt-2
167
+ include: _held_in_template_yaml
168
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
169
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
170
+ - task: arc_easy
171
+ task_alias: prompt-3
172
+ include: _held_in_template_yaml
173
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
174
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
175
+ - task: arc_easy
176
+ task_alias: prompt-4
177
+ include: _held_in_template_yaml
178
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
179
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
180
+ - task: arc_easy
181
+ task_alias: prompt-5
182
+ include: _held_in_template_yaml
183
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
184
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
185
+ - task: arc_easy
186
+ task_alias: prompt-6
187
+ include: _held_in_template_yaml
188
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
189
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
190
+ # Arc Challenge
191
+ - group: arc_challenge_flan
192
+ group_alias: Arc Challenge
193
+ task:
194
+ - task: arc_challenge
195
+ task_alias: prompt-0
196
+ include: _held_in_template_yaml
197
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
198
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
199
+ - task: arc_challenge
200
+ task_alias: prompt-1
201
+ include: _held_in_template_yaml
202
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
203
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
204
+ - task: arc_challenge
205
+ task_alias: prompt-2
206
+ include: _held_in_template_yaml
207
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
208
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
209
+ - task: arc_challenge
210
+ task_alias: prompt-3
211
+ include: _held_in_template_yaml
212
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
213
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
214
+ - task: arc_challenge
215
+ task_alias: prompt-4
216
+ include: _held_in_template_yaml
217
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
218
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
219
+ - task: arc_challenge
220
+ task_alias: prompt-5
221
+ include: _held_in_template_yaml
222
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
223
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
224
+ - task: arc_challenge
225
+ task_alias: prompt-6
226
+ include: _held_in_template_yaml
227
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
228
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
229
+ # BoolQ
230
+ - group: boolq_flan
231
+ group_alias: BoolQ
232
+ task:
233
+ - task: boolq
234
+ task_alias: prompt-0
235
+ include: _held_in_template_yaml
236
+ doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes"
237
+ doc_to_target: "{{['no', 'yes'][label]}}"
238
+ - task: boolq
239
+ task_alias: prompt-1
240
+ include: _held_in_template_yaml
241
+ doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes"
242
+ doc_to_target: "{{['no', 'yes'][label]}}"
243
+ - task: boolq
244
+ task_alias: prompt-2
245
+ include: _held_in_template_yaml
246
+ doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes"
247
+ doc_to_target: "{{['no', 'yes'][label]}}"
248
+ - task: boolq
249
+ task_alias: prompt-3
250
+ include: _held_in_template_yaml
251
+ doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes"
252
+ doc_to_target: "{{['no', 'yes'][label]}}"
253
+ - task: boolq
254
+ task_alias: prompt-4
255
+ include: _held_in_template_yaml
256
+ doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
257
+ doc_to_target: "{{['no', 'yes'][label]}}"
258
+ - task: boolq
259
+ task_alias: prompt-5
260
+ include: _held_in_template_yaml
261
+ doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
262
+ doc_to_target: "{{['no', 'yes'][label]}}"
263
+ - task: boolq
264
+ task_alias: prompt-6
265
+ include: _held_in_template_yaml
266
+ doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes"
267
+ doc_to_target: "{{['no', 'yes'][label]}}"
268
+ - task: boolq
269
+ task_alias: prompt-7
270
+ include: _held_in_template_yaml
271
+ doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes"
272
+ doc_to_target: "{{['no', 'yes'][label]}}"
273
+ - task: boolq
274
+ task_alias: prompt-8
275
+ include: _held_in_template_yaml
276
+ doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes"
277
+ doc_to_target: "{{['no', 'yes'][label]}}"
278
+ - task: boolq
279
+ task_alias: prompt-9
280
+ include: _held_in_template_yaml
281
+ doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes"
282
+ doc_to_target: "{{['no', 'yes'][label]}}"
283
+ # RTE
284
+ - group: rte_flan
285
+ group_alias: RTE
286
+ task:
287
+ - task: rte
288
+ task_alias: prompt-0
289
+ include: _held_in_template_yaml
290
+ doc_to_text: "{{sentence1}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{sentence2}}\"?\n\nOPTIONS:\n- yes\n- no"
291
+ doc_to_target: "{{['yes', 'no'][label]}}"
292
+ - task: rte
293
+ task_alias: prompt-1
294
+ include: _held_in_template_yaml
295
+ doc_to_text: "{{sentence1}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
296
+ doc_to_target: "{{['yes', 'no'][label]}}"
297
+ - task: rte
298
+ task_alias: prompt-2
299
+ include: _held_in_template_yaml
300
+ doc_to_text: "{{sentence1}}\n\nQ with options: Can we draw the following conclusion?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
301
+ doc_to_target: "{{['yes', 'no'][label]}}"
302
+ - task: rte
303
+ task_alias: prompt-3
304
+ include: _held_in_template_yaml
305
+ doc_to_text: "{{sentence1}}\nDoes this next sentence follow, given the preceding text?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
306
+ doc_to_target: "{{['yes', 'no'][label]}}"
307
+ - task: rte
308
+ task_alias: prompt-4
309
+ include: _held_in_template_yaml
310
+ doc_to_text: "{{sentence1}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{sentence2}}"
311
+ doc_to_target: "{{['yes', 'no'][label]}}"
312
+ - task: rte
313
+ task_alias: prompt-5
314
+ include: _held_in_template_yaml
315
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nThe answer is"
316
+ doc_to_target: "{{['yes', 'no'][label]}}"
317
+ - task: rte
318
+ task_alias: prompt-6
319
+ include: _held_in_template_yaml
320
+ doc_to_text: "Read the text and determine if the sentence is true:\n\n{{sentence1}}\n\nSentence: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
321
+ doc_to_target: "{{['yes', 'no'][label]}}"
322
+ - task: rte
323
+ task_alias: prompt-7
324
+ include: _held_in_template_yaml
325
+ doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
326
+ doc_to_target: "{{['yes', 'no'][label]}}"
327
+ - task: rte
328
+ task_alias: prompt-8
329
+ include: _held_in_template_yaml
330
+ doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{sentence2}}\n\n{{sentence1}}\nOPTIONS:\n- yes\n- no"
331
+ doc_to_target: "{{['yes', 'no'][label]}}"
lm-evaluation/lm_eval/tasks/benchmarks/multimedqa/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MultiMedQA (multiple-choice subset)
2
+
3
+ ### Paper
4
+
5
+ Title: Large Language Models Encode Clinical Knowledge
6
+
7
+ Abstract: https://arxiv.org/abs/2212.13138
8
+
9
+ A benchmark combining four existing multiple-choice question answering datasets spanning professional medical exams and research queries.
10
+
11
+ ### Citation
12
+
13
+ ```
14
+ @Article{Singhal2023,
15
+ author={Singhal, Karan and Azizi, Shekoofeh and Tu, Tao and Mahdavi, S. Sara and Wei, Jason and Chung, Hyung Won and Scales, Nathan and Tanwani, Ajay and Cole-Lewis, Heather and Pfohl, Stephen and Payne, Perry and Seneviratne, Martin and Gamble, Paul and Kelly, Chris and Babiker, Abubakr and Sch{\"a}rli, Nathanael and Chowdhery, Aakanksha and Mansfield, Philip and Demner-Fushman, Dina and Ag{\"u}era y Arcas, Blaise and Webster, Dale and Corrado, Greg S. and Matias, Yossi and Chou, Katherine and Gottweis, Juraj and Tomasev, Nenad and Liu, Yun and Rajkomar, Alvin and Barral, Joelle and Semturs, Christopher and Karthikesalingam, Alan and Natarajan, Vivek},
16
+ title={Large language models encode clinical knowledge},
17
+ journal={Nature},
18
+ year={2023},
19
+ month={Aug},
20
+ day={01},
21
+ volume={620},
22
+ number={7972},
23
+ pages={172-180},
24
+ issn={1476-4687},
25
+ doi={10.1038/s41586-023-06291-2},
26
+ url={https://doi.org/10.1038/s41586-023-06291-2}
27
+ }
28
+ ```
29
+
30
+ ### Tasks
31
+
32
+ * [PubMedQA](https://pubmedqa.github.io/) - 1,000 expert-labeled Q&A pairs where a question and corresponding PubMed abstract as context is given and the a yes/maybe/no answer must be produced. Unlike the rest of the tasks in this suite, PubMedQA is a closed-domain Q&A task.
33
+ * [MedQA](https://github.com/jind11/MedQA) - US Medical License Exam (USMLE) questions with 4 or 5 possible answers. Typically, only the 4-option questions are used.
34
+ * [MedMCQA](https://medmcqa.github.io/) - 4-option multiple choice questions from Indian medical entrance examinations, >191k total questions.
35
+ * [MMLU](https://arxiv.org/abs/2009.03300) - 4-option multiple choice exam questions from a variety of domains. The following 6 domains are utilized here:
36
+ * Anatomy
37
+ * Clinical Knowledge
38
+ * College Medicine
39
+ * Medical Genetics
40
+ * Professional Medicine
41
+ * College Biology
42
+
43
+ Note that MultiMedQA also includes some short-form and long-form Q&A tasks (LiveQA, MedicationQA, HealthSearchQA). Evaluation on these tasks is usually done by experts and is not typically performed automatically, and therefore is ignored here.
lm-evaluation/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: multimedqa
2
+ task:
3
+ - pubmedqa
4
+ - medmcqa
5
+ - medqa_4options
6
+ - task: mmlu_anatomy
7
+ task_alias: "anatomy (mmlu)"
8
+ - task: mmlu_clinical_knowledge
9
+ task_alias: "clinical_knowledge (mmlu)"
10
+ - task: mmlu_college_medicine
11
+ task_alias: "college_medicine (mmlu)"
12
+ - task: mmlu_medical_genetics
13
+ task_alias: "medical_genetics (mmlu)"
14
+ - task: mmlu_professional_medicine
15
+ task_alias: "professional_medicine (mmlu)"
16
+ - task: mmlu_college_biology
17
+ task_alias: "college_biology (mmlu)"
lm-evaluation/lm_eval/tasks/code_x_glue/code-text/bleu.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ import math
3
+ import re
4
+ import sys
5
+ import xml.sax.saxutils
6
+ from typing import Any, Dict, List, Optional, Pattern, Tuple, Union
7
+
8
+
9
+ """
10
+ This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
11
+ """
12
+
13
+ # $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
14
+
15
+ """Provides:
16
+
17
+ cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
18
+ cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
19
+ score_cooked(alltest, n=4): Score a list of cooked test sentences.
20
+
21
+ score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
22
+
23
+ The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
24
+ """
25
+
26
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
27
+ nonorm = 0
28
+
29
+ preserve_case = False
30
+ eff_ref_len = "shortest"
31
+
32
+ normalize1: List[Tuple[Union[Pattern[str], str], str]] = [
33
+ ("<skipped>", ""), # strip "skipped" tags
34
+ (r"-\n", ""), # strip end-of-line hyphenation and join lines
35
+ (r"\n", " "), # join lines
36
+ # (r'(\d)\s+(?=\d)', r'\1'), # join digits
37
+ ]
38
+ normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
39
+
40
+ normalize2: List[Tuple[Union[Pattern[str], str], str]] = [
41
+ (
42
+ r"([\{-\~\[-\` -\&\(-\+\:-\@\/])",
43
+ r" \1 ",
44
+ ), # tokenize punctuation. apostrophe is missing
45
+ (
46
+ r"([^0-9])([\.,])",
47
+ r"\1 \2 ",
48
+ ), # tokenize period and comma unless preceded by a digit
49
+ (
50
+ r"([\.,])([^0-9])",
51
+ r" \1 \2",
52
+ ), # tokenize period and comma unless followed by a digit
53
+ (r"([0-9])(-)", r"\1 \2 "), # tokenize dash when preceded by a digit
54
+ ]
55
+ normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
56
+
57
+
58
+ def normalize(s):
59
+ """Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl."""
60
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
61
+ if nonorm:
62
+ return s.split()
63
+ if not isinstance(s, str):
64
+ s = " ".join(s)
65
+ # language-independent part:
66
+ for pattern, replace in normalize1:
67
+ s = re.sub(pattern, replace, s)
68
+ s = xml.sax.saxutils.unescape(s, {"&quot;": '"'})
69
+ # language-dependent part (assuming Western languages):
70
+ s = " %s " % s
71
+ if not preserve_case:
72
+ s = s.lower() # this might not be identical to the original
73
+ for pattern, replace in normalize2:
74
+ s = re.sub(pattern, replace, s)
75
+ return s.split()
76
+
77
+
78
+ def count_ngrams(words, n=4):
79
+ counts: Dict[Any, int] = {}
80
+ for k in range(1, n + 1):
81
+ for i in range(len(words) - k + 1):
82
+ ngram = tuple(words[i : i + k])
83
+ counts[ngram] = counts.get(ngram, 0) + 1
84
+ return counts
85
+
86
+
87
+ def cook_refs(refs, n=4):
88
+ """Takes a list of reference sentences for a single segment
89
+ and returns an object that encapsulates everything that BLEU
90
+ needs to know about them."""
91
+
92
+ refs = [normalize(ref) for ref in refs]
93
+ maxcounts: Dict[Tuple[str], int] = {}
94
+ for ref in refs:
95
+ counts = count_ngrams(ref, n)
96
+ for ngram, count in counts.items():
97
+ maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
98
+ return ([len(ref) for ref in refs], maxcounts)
99
+
100
+
101
+ def cook_test(test, item, n=4):
102
+ """Takes a test sentence and returns an object that
103
+ encapsulates everything that BLEU needs to know about it."""
104
+ (reflens, refmaxcounts) = item
105
+ test = normalize(test)
106
+ result: Dict[str, Any] = {}
107
+ result["testlen"] = len(test)
108
+
109
+ # Calculate effective reference sentence length.
110
+
111
+ if eff_ref_len == "shortest":
112
+ result["reflen"] = min(reflens)
113
+ elif eff_ref_len == "average":
114
+ result["reflen"] = float(sum(reflens)) / len(reflens)
115
+ elif eff_ref_len == "closest":
116
+ min_diff: Optional[int] = None
117
+ for reflen in reflens:
118
+ if min_diff is None or abs(reflen - len(test)) < min_diff:
119
+ min_diff = abs(reflen - len(test))
120
+ result["reflen"] = reflen
121
+
122
+ result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)]
123
+
124
+ result["correct"] = [0] * n
125
+ counts = count_ngrams(test, n)
126
+ for ngram, count in counts.items():
127
+ result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
128
+
129
+ return result
130
+
131
+
132
+ def score_cooked(allcomps, n=4, ground=0, smooth=1):
133
+ totalcomps: Dict[str, Any] = {
134
+ "testlen": 0,
135
+ "reflen": 0,
136
+ "guess": [0] * n,
137
+ "correct": [0] * n,
138
+ }
139
+ for comps in allcomps:
140
+ for key in ["testlen", "reflen"]:
141
+ totalcomps[key] += comps[key]
142
+ for key in ["guess", "correct"]:
143
+ for k in range(n):
144
+ totalcomps[key][k] += comps[key][k]
145
+ logbleu = 0.0
146
+ all_bleus: List[float] = []
147
+ for k in range(n):
148
+ correct = totalcomps["correct"][k]
149
+ guess = totalcomps["guess"][k]
150
+ addsmooth = 0
151
+ if smooth == 1 and k > 0:
152
+ addsmooth = 1
153
+ logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log(
154
+ guess + addsmooth + sys.float_info.min
155
+ )
156
+ if guess == 0:
157
+ all_bleus.append(-10000000.0)
158
+ else:
159
+ all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess))
160
+
161
+ logbleu /= float(n)
162
+ all_bleus.insert(0, logbleu)
163
+
164
+ brevPenalty = min(
165
+ 0, 1 - float(totalcomps["reflen"] + 1) / (totalcomps["testlen"] + 1)
166
+ )
167
+ for i in range(len(all_bleus)):
168
+ if i == 0:
169
+ all_bleus[i] += brevPenalty
170
+ all_bleus[i] = math.exp(all_bleus[i])
171
+ return all_bleus
172
+
173
+
174
+ def bleu(refs, candidate, ground=0, smooth=1):
175
+ refs = cook_refs(refs)
176
+ test = cook_test(candidate, refs)
177
+ return score_cooked([test], ground=ground, smooth=smooth)
178
+
179
+
180
+ def splitPuncts(line):
181
+ return " ".join(re.findall(r"[\w]+|[^\s\w]", line))
182
+
183
+
184
+ def computeMaps(predictions, goldfile):
185
+ predictionMap: Dict[str, list] = {}
186
+ goldMap: Dict[str, list] = {}
187
+ gf = open(goldfile, "r", encoding="utf-8")
188
+
189
+ for row in predictions:
190
+ cols = row.strip().split("\t")
191
+ if len(cols) == 1:
192
+ (rid, pred) = (cols[0], "")
193
+ else:
194
+ (rid, pred) = (cols[0], cols[1])
195
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
196
+
197
+ for row in gf:
198
+ (rid, pred) = row.split("\t")
199
+ if rid in predictionMap: # Only insert if the id exists for the method
200
+ if rid not in goldMap:
201
+ goldMap[rid] = []
202
+ goldMap[rid].append(splitPuncts(pred.strip().lower()))
203
+
204
+ sys.stderr.write("Total: " + str(len(goldMap)) + "\n")
205
+ return (goldMap, predictionMap)
206
+
207
+
208
+ # m1 is the reference map
209
+ # m2 is the prediction map
210
+ def bleuFromMaps(m1, m2):
211
+ score = [0] * 5
212
+ num = 0.0
213
+
214
+ for key in m1:
215
+ if key in m2:
216
+ bl = bleu(m1[key], m2[key][0])
217
+ score = [score[i] + bl[i] for i in range(0, len(bl))]
218
+ num += 1
219
+ return [s * 100.0 / num for s in score]
220
+
221
+
222
+ def smoothed_bleu_4(references, predictions, **kwargs):
223
+ predictionMap = {}
224
+ goldMap = {}
225
+
226
+ for rid, pred in enumerate(predictions):
227
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
228
+
229
+ for rid, row in enumerate(references):
230
+ goldMap[rid] = [splitPuncts(row.strip().lower())]
231
+
232
+ return bleuFromMaps(goldMap, predictionMap)[0]
233
+
234
+
235
+ if __name__ == "__main__":
236
+ reference_file = sys.argv[1]
237
+ predictions = []
238
+ for row in sys.stdin:
239
+ predictions.append(row)
240
+ (goldMap, predictionMap) = computeMaps(predictions, reference_file)
241
+ print(bleuFromMaps(goldMap, predictionMap)[0])
lm-evaluation/lm_eval/tasks/code_x_glue/code-text/python.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - codexglue_code2text
3
+ task: code2text_python
4
+ dataset_path: CM/codexglue_code2text_python
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ output_type: generate_until
9
+ generation_kwargs:
10
+ num_beams: 10
11
+ max_gen_toks: 128
12
+ until:
13
+ - "</s>"
14
+ doc_to_text: !function utils.doc_to_text
15
+ doc_to_target: !function utils.doc_to_target
16
+ metric_list:
17
+ - metric: !function bleu.smoothed_bleu_4
18
+ aggregation: mean
19
+ higher_is_better: True
20
+ metadata:
21
+ version: 1.0
lm-evaluation/lm_eval/tasks/coqa/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CoQA
2
+
3
+ ### Paper
4
+
5
+ Title: `CoQA: A Conversational Question Answering Challenge`
6
+
7
+ Abstract: https://arxiv.org/pdf/1808.07042.pdf
8
+
9
+ CoQA is a large-scale dataset for building Conversational Question Answering
10
+ systems. The goal of the CoQA challenge is to measure the ability of machines to
11
+ understand a text passage and answer a series of interconnected questions that
12
+ appear in a conversation.
13
+
14
+ Homepage: https://stanfordnlp.github.io/coqa/
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ BibTeX-formatted citation goes here
20
+ ```
21
+
22
+ ### Groups and Tasks
23
+
24
+ #### Groups
25
+
26
+ * Not part of a group yet
27
+
28
+ #### Tasks
29
+
30
+ * `coqa`
31
+
32
+ ### Checklist
33
+
34
+ For adding novel benchmarks/datasets to the library:
35
+ * [ ] Is the task an existing benchmark in the literature?
36
+ * [ ] Have you referenced the original paper that introduced the task?
37
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
38
+
39
+
40
+ If other tasks on this dataset are already supported:
41
+ * [ ] Is the "Main" variant of this task clearly denoted?
42
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
43
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/coqa/default.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: coqa
2
+ dataset_path: EleutherAI/coqa
3
+ output_type: generate_until
4
+ training_split: train
5
+ validation_split: validation
6
+ doc_to_text: !function utils.doc_to_text
7
+ doc_to_target: !function utils.doc_to_target
8
+ process_results: !function utils.process_results
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{story}} {{question.input_text|join('\n')}}"
11
+ generation_kwargs:
12
+ until:
13
+ - "\nQ:"
14
+ metric_list:
15
+ - metric: em
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ - metric: f1
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ metadata:
22
+ version: 3.0
23
+ dataset_kwargs:
24
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/coqa/utils.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import zip_longest
2
+
3
+ import transformers.data.metrics.squad_metrics as squad_metrics
4
+
5
+
6
+ def doc_to_text(doc):
7
+ # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1}
8
+ # and a question qi, the task is to predict the answer ai
9
+ doc_text = doc["story"] + "\n\n"
10
+ for q, a in zip_longest(
11
+ doc["questions"]["input_text"], doc["answers"]["input_text"][:-1]
12
+ ): # omit target answer ai
13
+ question = f"Q: {q}\n\n"
14
+ answer = f"A: {a}\n\n" if a is not None else "A:"
15
+ doc_text += question + answer
16
+ return doc_text
17
+
18
+
19
+ def doc_to_target(doc):
20
+ turn_id = len(doc["questions"]["input_text"])
21
+ # Returns unique answers and valid alternatives (Some questions in CoQA have multiple valid answers).
22
+ answers = []
23
+ answer_forturn = doc["answers"]["input_text"][turn_id - 1]
24
+ answers.append(answer_forturn)
25
+
26
+ additional_answers = doc.get("additional_answers")
27
+ if additional_answers:
28
+ for key in additional_answers:
29
+ additional_answer_for_turn = additional_answers[key]["input_text"][
30
+ turn_id - 1
31
+ ]
32
+ if additional_answer_for_turn.lower() not in map(str.lower, answers):
33
+ answers.append(additional_answer_for_turn)
34
+ return answers
35
+
36
+
37
+ def em(gold_list, pred):
38
+ # tests for exact match and on the normalised answer (compute_exact)
39
+ em_sum = 0.0
40
+ if len(gold_list) > 1:
41
+ for i in range(len(gold_list)):
42
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
43
+ # predictions compared against (n) golds and take maximum
44
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers)
45
+ else:
46
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
47
+
48
+ return em_sum / max(1, len(gold_list))
49
+
50
+
51
+ def compute_scores(gold_list, pred):
52
+ # tests for exact match and on the normalised answer (compute_exact)
53
+ # test for overlap (compute_f1)
54
+ f1_sum = 0.0
55
+ em_sum = 0.0
56
+ if len(gold_list) > 1:
57
+ for i in range(len(gold_list)):
58
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
59
+ # predictions compared against (n) golds and take maximum
60
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_answers)
61
+ f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_answers)
62
+ else:
63
+ em_sum += max(squad_metrics.compute_exact(a, pred) for a in gold_list)
64
+ f1_sum += max(squad_metrics.compute_f1(a, pred) for a in gold_list)
65
+
66
+ return {
67
+ "em": em_sum / max(1, len(gold_list)),
68
+ "f1": f1_sum / max(1, len(gold_list)),
69
+ }
70
+
71
+
72
+ def process_results(doc, results):
73
+ gold_list = doc_to_target(doc)
74
+ pred = results[0].strip().split("\n")[0]
75
+
76
+ scores = compute_scores(gold_list, pred)
77
+ return scores
lm-evaluation/lm_eval/tasks/eq_bench/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EQ-Bench
2
+
3
+ Title: `EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models`
4
+
5
+ Abstract: https://arxiv.org/abs/2312.06281
6
+
7
+ EQ-Bench is a benchmark for language models designed to assess emotional intelligence.
8
+
9
+ Why emotional intelligence? One reason is that it represents a subset of abilities that are important for the user experience, and which isn't explicitly tested by other benchmarks. Another reason is that it's not trivial to improve scores by fine tuning for the benchmark, which makes it harder to "game" the leaderboard.
10
+
11
+ EQ-Bench is a little different from traditional psychometric tests. It uses a specific question format, in which the subject has to read a dialogue then rate the intensity of possible emotional responses of one of the characters. Every question is interpretative and assesses the ability to predict the magnitude of the 4 presented emotions. The test is graded without the need for a judge (so there is no length bias). It's cheap to run (only 171 questions), and produces results that correlate strongly with human preference (Arena ELO) and multi-domain benchmarks like MMLU.
12
+
13
+ Homepage: https://eqbench.com/
14
+
15
+
16
+ NOTE: There are some key differences between the lm-evaluation-harness version and the implementation described in the EQ-Bench paper (These have been OK'd by the author):
17
+
18
+ - The lm-eval version uses the EQ-Bench v2 test set (171 questions) and score calculation. It does not incorporate the revision part of the prompt, as per v2.1 (https://github.com/EQ-bench/EQ-Bench)
19
+ - No retries in lm-eval version (EQ-Bench pipeline retries with successively higher temps if it encounters unparseable answers)
20
+ - In the original implementation, unparseable answers are excluded from the final score, and 83% of answers have to be parseable or a fail is returned. The lm-eval version instead assigns 0 to unparsable answers and has no fail criteria. So for lower performing models, there may be differences with the EQ-Bench leaderboard.
21
+
22
+
23
+ ### Citation
24
+
25
+ ```bibtex
26
+ @misc{paech2023eqbench,
27
+ title={EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models},
28
+ author={Samuel J. Paech},
29
+ year={2023},
30
+ eprint={2312.06281},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CL}
33
+ }
34
+ ```
35
+
36
+ ### Groups and Tasks
37
+
38
+ #### Groups
39
+
40
+ * Not part of a group yet
41
+
42
+ #### Tasks
43
+
44
+ * `eq_bench`
45
+
46
+ ### Checklist
47
+
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [x] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+ If other tasks on this dataset are already supported:
53
+ * [ ] Is the "Main" variant of this task clearly denoted?
54
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
55
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/eq_bench/default.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: eq_bench
2
+ dataset_path: pbevan11/EQ-Bench
3
+ output_type: generate_until
4
+ validation_split: validation
5
+ doc_to_text: prompt
6
+ doc_to_target: reference_answer_fullscale
7
+ process_results: !function utils.calculate_score_fullscale
8
+ generation_kwargs:
9
+ do_sample: false
10
+ temperature: 0.0
11
+ max_gen_toks: 80
12
+ metric_list:
13
+ - metric: eqbench
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ - metric: percent_parseable
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 2.1
lm-evaluation/lm_eval/tasks/eq_bench/utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import re
3
+
4
+
5
+ def calculate_score_fullscale(docs, results):
6
+ reference = eval(docs["reference_answer_fullscale"])
7
+ user = dict(re.findall(r"(\w+):\s+(\d+)", results[0]))
8
+ # First check that the emotions specified in the answer match those in the reference
9
+ if len(user.items()) != 4:
10
+ # print('! Error: 4 emotions were not returned')
11
+ # print(user)
12
+ return {"eqbench": 0, "percent_parseable": 0}
13
+ emotions_dict = {}
14
+ for emotion, user_emotion_score in user.items():
15
+ for i in range(1, 5):
16
+ if emotion == reference[f"emotion{i}"]:
17
+ emotions_dict[emotion] = True
18
+ if len(emotions_dict) != 4:
19
+ print("! Error: emotions did not match reference")
20
+ print(user)
21
+ return {"eqbench": 0, "percent_parseable": 0}
22
+
23
+ difference_tally = (
24
+ 0 # Tally of differerence from reference answers for this question
25
+ )
26
+
27
+ # Iterate over each emotion in the user's answers.
28
+ for emotion, user_emotion_score in user.items():
29
+ # If this emotion is in the reference, calculate the difference between the user's score and the reference score.
30
+ for i in range(1, 5):
31
+ if emotion == reference[f"emotion{i}"]:
32
+ d = abs(
33
+ float(user_emotion_score) - float(reference[f"emotion{i}_score"])
34
+ )
35
+ # this will be a value between 0 and 10
36
+ if d == 0:
37
+ scaled_difference = 0
38
+ elif d <= 5:
39
+ # S-shaped scaling function
40
+ # https://www.desmos.com/calculator
41
+ # 6.5\cdot\ \frac{1}{\left(1\ +\ e^{\left(-1.2\cdot\left(x-4\right)\right)}\right)}
42
+ scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))
43
+
44
+ else:
45
+ scaled_difference = d
46
+ difference_tally += scaled_difference
47
+
48
+ # Inverting the difference tally so that the closer the answer is to reference, the higher the score.
49
+ # The adjustment constant is chosen such that answering randomly produces a score of zero.
50
+ adjust_const = 0.7477
51
+ final_score = 10 - (difference_tally * adjust_const)
52
+ final_score_percent = final_score * 10
53
+
54
+ return {"eqbench": final_score_percent, "percent_parseable": 100}
lm-evaluation/lm_eval/tasks/lambada_cloze/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LAMBADA Cloze
2
+
3
+ ### Paper
4
+
5
+ Title: `The LAMBADA dataset: Word prediction requiring a broad discourse context`
6
+
7
+ Abstract: https://arxiv.org/abs/1606.06031
8
+
9
+ Cloze-style LAMBADA dataset.
10
+ LAMBADA is a dataset to evaluate the capabilities of computational models for text
11
+ understanding by means of a word prediction task. LAMBADA is a collection of narrative
12
+ passages sharing the characteristic that human subjects are able to guess their last
13
+ word if they are exposed to the whole passage, but not if they only see the last
14
+ sentence preceding the target word. To succeed on LAMBADA, computational models
15
+ cannot simply rely on local context, but must be able to keep track of information
16
+ in the broader discourse.
17
+
18
+ Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
19
+
20
+
21
+ ### Citation
22
+
23
+ ```
24
+ @misc{
25
+ author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
26
+ title={The LAMBADA dataset},
27
+ DOI={10.5281/zenodo.2630551},
28
+ publisher={Zenodo},
29
+ year={2016},
30
+ month={Aug}
31
+ }
32
+ ```
33
+
34
+ ### Groups and Tasks
35
+
36
+ #### Groups
37
+
38
+ * `lambada_cloze`
39
+
40
+ #### Tasks
41
+
42
+ * `lambada_openai_cloze_yaml`
43
+ * `lambada_standard_cloze_yaml`
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [ ] Is the task an existing benchmark in the literature?
49
+ * [ ] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [ ] Is the "Main" variant of this task clearly denoted?
55
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - lambada_cloze
3
+ task: lambada_standard_cloze_yaml
4
+ dataset_path: lambada
5
+ dataset_name: null
6
+ output_type: loglikelihood
7
+ validation_split: validation
8
+ test_split: test
9
+ doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->"
10
+ doc_to_target: "{{' '+text.split(' ')[-1]}}"
11
+ should_decontaminate: true
12
+ doc_to_decontamination_query: "{{text}}"
13
+ metric_list:
14
+ - metric: perplexity
15
+ aggregation: perplexity
16
+ higher_is_better: false
17
+ - metric: acc
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation/lm_eval/tasks/medmcqa/medmcqa.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: medmcqa
2
+ dataset_path: medmcqa
3
+ output_type: multiple_choice
4
+ training_split: train
5
+ validation_split: validation
6
+ test_split: validation
7
+ doc_to_text: !function utils_medmcqa.doc_to_text
8
+ doc_to_target: cop
9
+ doc_to_choice: [ 'A','B','C','D' ]
10
+ should_decontaminate: true
11
+ doc_to_decontamination_query: "{{question}}"
12
+ metric_list:
13
+ - metric: acc
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ - metric: acc_norm
17
+ aggregation: mean
18
+ higher_is_better: true
lm-evaluation/lm_eval/tasks/medmcqa/utils_medmcqa.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from Master
2
+ def doc_to_text(doc) -> str:
3
+ """
4
+ Question: <question>
5
+ Choices:
6
+ A. <choice1>
7
+ B. <choice2>
8
+ C. <choice3>
9
+ D. <choice4>
10
+ Answer:
11
+ """
12
+ choices = [doc["opa"], doc["opb"], doc["opc"], doc["opd"]]
13
+ option_choices = {
14
+ "A": choices[0],
15
+ "B": choices[1],
16
+ "C": choices[2],
17
+ "D": choices[3],
18
+ }
19
+
20
+ prompt = "Question: " + doc["question"] + "\nChoices:\n"
21
+ for choice, option in option_choices.items():
22
+ prompt += f"{choice.upper()}. {option}\n"
23
+ prompt += "Answer:"
24
+ return prompt
lm-evaluation/lm_eval/tasks/mgsm/README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MGSM
2
+
3
+ ### Paper
4
+
5
+ Title: `Language Models are Multilingual Chain-of-Thought Reasoners`
6
+
7
+ Abstract: https://arxiv.org/abs/2210.03057
8
+
9
+ Multilingual Grade School Math Benchmark (MGSM) is a benchmark of grade-school math problems, proposed in the paper [Language models are multilingual chain-of-thought reasoners](http://arxiv.org/abs/2210.03057).
10
+
11
+ The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are:
12
+ - Spanish
13
+ - French
14
+ - German
15
+ - Russian
16
+ - Chinese
17
+ - Japanese
18
+ - Thai
19
+ - Swahili
20
+ - Bengali
21
+ - Telugu
22
+
23
+ GSM8K (Grade School Math 8K) is a dataset of 8.5K high quality linguistically diverse grade school math word problems. The dataset was created to support the task of question answering on basic mathematical problems that require multi-step reasoning.
24
+
25
+ You can find the input and targets for each of the ten languages (and English) as `.tsv` files.
26
+ We also include few-shot exemplars that are also manually translated from each language in `exemplars.py`.
27
+
28
+ Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm
29
+
30
+
31
+ ### Citation
32
+
33
+ ```
34
+ @misc{cobbe2021training,
35
+ title={Training Verifiers to Solve Math Word Problems},
36
+ author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
37
+ year={2021},
38
+ eprint={2110.14168},
39
+ archivePrefix={arXiv},
40
+ primaryClass={cs.LG}
41
+ }
42
+ @misc{shi2022language,
43
+ title={Language Models are Multilingual Chain-of-Thought Reasoners},
44
+ author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei},
45
+ year={2022},
46
+ eprint={2210.03057},
47
+ archivePrefix={arXiv},
48
+ primaryClass={cs.CL}
49
+ }
50
+ ```
51
+
52
+ ### Groups and Tasks
53
+
54
+ #### Groups
55
+
56
+ * `mgsm_direct`: Direct question
57
+ * `mgsm_direct_bn`: Bengali
58
+ * `mgsm_direct_de`: German
59
+ * `mgsm_direct_en`: English
60
+ * `mgsm_direct_es`: Spanish
61
+ * `mgsm_direct_fr`: French
62
+ * `mgsm_direct_ja`: Japanese
63
+ * `mgsm_direct_ru`: Russian
64
+ * `mgsm_direct_sw`: Swahili
65
+ * `mgsm_direct_te`: Telugu
66
+ * `mgsm_direct_th`: Thai
67
+ * `mgsm_direct_zh`: Chinese
68
+ * `mgsm_cot_native`: Question with Answer followed by CoT prompt in the same language as the dataset.
69
+ * `mgsm_cot_native_bn`: Bengali
70
+ * `mgsm_cot_native_de`: German
71
+ * `mgsm_cot_native_en`: English
72
+ * `mgsm_cot_native_es`: Spanish
73
+ * `mgsm_cot_native_fr`: French
74
+ * `mgsm_cot_native_ja`: Japanese
75
+ * `mgsm_cot_native_ru`: Russian
76
+ * `mgsm_cot_native_sw`: Swahili
77
+ * `mgsm_cot_native_te`: Telugu
78
+ * `mgsm_cot_native_th`: Thai
79
+ * `mgsm_cot_native_zh`: Chinese
80
+
81
+ Examplar Samples: https://github.com/google-research/url-nlp/blob/main/mgsm/exemplars.py
82
+
83
+ ### Checklist
84
+
85
+ For adding novel benchmarks/datasets to the library:
86
+ * [ ] Is the task an existing benchmark in the literature?
87
+ * [ ] Have you referenced the original paper that introduced the task?
88
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
89
+
90
+
91
+ If other tasks on this dataset are already supported:
92
+ * [ ] Is the "Main" variant of this task clearly denoted?
93
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
94
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/mgsm/direct/direct_yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: mgsm_direct
5
+ dataset_path: juletxara/mgsm
6
+ dataset_name: null # Overridden by language-specific config.
7
+ output_type: generate_until
8
+ training_split: train
9
+ test_split: test
10
+ target_delimiter: ""
11
+ generation_kwargs:
12
+ until:
13
+ - "\n\n"
14
+ - "\n"
15
+ do_sample: false
16
+ temperature: 0.0
17
+ filter_list:
18
+ - name: remove_whitespace
19
+ filter:
20
+ - function: remove_whitespace
21
+ - function: take_first
22
+ - filter:
23
+ - function: regex
24
+ group_select: -1
25
+ regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
26
+ - function: take_first
27
+ name: flexible-extract
28
+ metric_list:
29
+ - metric: exact_match
30
+ aggregation: mean
31
+ higher_is_better: true
32
+ ignore_case: true
33
+ ignore_punctuation: true
34
+ metadata:
35
+ version: 2.0
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bn
3
+ doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"প্রশ্ন: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'প্রশ্ন:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_bn
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: de
3
+ doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAntwort:"}}{% else %}{{"Frage: "+question+"\nAntwort:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Frage:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_de
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: en
3
+ doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Question: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Question:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_en
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es
3
+ doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Pregunta:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_es
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: fr
3
+ doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nRéponse :"}}{% else %}{{"Question : "+question+"\nRéponse :"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Question :'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_fr
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ja
3
+ doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"問題: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - '問題:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_ja
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ru
3
+ doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Задача: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Задача:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_ru
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sw
3
+ doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Swali: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Swali:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_sw
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: th
3
+ doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"โจทย์: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'โจทย์:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_th
lm-evaluation/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: zh
3
+ doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"问题: "+question+"\nAnswer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - '问题:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: direct_yaml
12
+ task: mgsm_direct_zh
lm-evaluation/lm_eval/tasks/mgsm/en_cot/cot_yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: mgsm_cot_native
5
+ dataset_path: juletxara/mgsm
6
+ dataset_name: null # Overridden by language-specific config.
7
+ output_type: generate_until
8
+ training_split: train
9
+ test_split: test
10
+ generation_kwargs:
11
+ until:
12
+ - "\n\n"
13
+ - "\n"
14
+ do_sample: false
15
+ temperature: 0.0
16
+ target_delimiter: " "
17
+ metric_list:
18
+ - metric: exact_match
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ filter_list:
24
+ - name: "strict-match"
25
+ filter:
26
+ - function: "regex"
27
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)"
28
+ - function: "take_first"
29
+ - filter:
30
+ - function: regex
31
+ group_select: -1
32
+ regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
33
+ - function: take_first
34
+ name: flexible-extract
35
+ metadata:
36
+ version: 2.0
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bn
3
+ doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"প্রশ্ন: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'প্রশ্ন:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_bn
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: de
3
+ doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Frage: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Frage:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_de
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: en
3
+ doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Question:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_en
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es
3
+ doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Pregunta: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Pregunta:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_es
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: fr
3
+ doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question : "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Question :'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_fr
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ja
3
+ doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"問題: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - '問題:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_ja
lm-evaluation/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ru
3
+ doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
4
+ doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Задача: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
5
+ generation_kwargs:
6
+ do_sample: false
7
+ until:
8
+ - 'Задача:'
9
+ - </s>
10
+ - <|im_end|>
11
+ include: cot_yaml
12
+ task: mgsm_en_cot_ru