applied-ai-018 commited on
Commit
aada8c5
·
verified ·
1 Parent(s): b13a949

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/agieval/README.md +114 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/agieval/aqua-rat.yaml +24 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-biology.yaml +6 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-chemistry.yaml +6 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-chinese.yaml +6 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-english.yaml +6 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-geography.yaml +6 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-history.yaml +6 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-mathcloze.yaml +25 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-mathqa.yaml +6 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-physics.yaml +6 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/agieval/jec-qa-ca.yaml +6 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/agieval/jec-qa-kd.yaml +6 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/agieval/logiqa-en.yaml +7 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/agieval/logiqa-zh.yaml +6 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-ar.yaml +7 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-lr.yaml +7 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-rc.yaml +7 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/agieval/math.yaml +25 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-en-without-passage.yaml +7 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-en.yaml +7 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-math.yaml +7 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/agieval/utils.py +274 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_afr_Latn.yaml +4 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_apc_Arab.yaml +4 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_arb_Latn.yaml +4 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_bam_Latn.yaml +4 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ben_Beng.yaml +4 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ces_Latn.yaml +4 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_est_Latn.yaml +4 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_fra_Latn.yaml +4 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_heb_Hebr.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_hun_Latn.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_isl_Latn.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_kat_Geor.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml +4 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_lit_Latn.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_nld_Latn.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_nya_Latn.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_pan_Guru.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_pol_Latn.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_som_Latn.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_tel_Telu.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml +14 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml +331 -0
lm-evaluation/build/lib/lm_eval/tasks/agieval/README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AGIEval
2
+
3
+ ### Paper
4
+
5
+ Title: AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models
6
+
7
+ Abstract: https://arxiv.org/abs/2304.06364.pdf
8
+
9
+ AGIEval is a human-centric benchmark specifically designed to evaluate the general abilities of foundation models in tasks pertinent to human cognition and problem-solving.
10
+ This benchmark is derived from 20 official, public, and high-standard admission and qualification exams intended for general human test-takers, such as general college admission tests (e.g., Chinese College Entrance Exam (Gaokao) and American SAT), law school admission tests, math competitions, lawyer qualification tests, and national civil service exams.
11
+
12
+ Homepage: https://github.com/ruixiangcui/AGIEval
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @misc{zhong2023agieval,
18
+ title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models},
19
+ author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan},
20
+ year={2023},
21
+ eprint={2304.06364},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+ ```
26
+
27
+ Please make sure to cite all the individual datasets in your paper when you use them. We provide the relevant citation information below:
28
+
29
+ ```
30
+ @inproceedings{ling-etal-2017-program,
31
+ title = "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems",
32
+ author = "Ling, Wang and
33
+ Yogatama, Dani and
34
+ Dyer, Chris and
35
+ Blunsom, Phil",
36
+ booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
37
+ month = jul,
38
+ year = "2017",
39
+ address = "Vancouver, Canada",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/P17-1015",
42
+ doi = "10.18653/v1/P17-1015",
43
+ pages = "158--167",
44
+ abstract = "Solving algebraic word problems requires executing a series of arithmetic operations{---}a program{---}to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.",
45
+ }
46
+
47
+ @inproceedings{hendrycksmath2021,
48
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
49
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
50
+ journal={NeurIPS},
51
+ year={2021}
52
+ }
53
+
54
+ @inproceedings{Liu2020LogiQAAC,
55
+ title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
56
+ author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
57
+ booktitle={International Joint Conference on Artificial Intelligence},
58
+ year={2020}
59
+ }
60
+
61
+ @inproceedings{zhong2019jec,
62
+ title={JEC-QA: A Legal-Domain Question Answering Dataset},
63
+ author={Zhong, Haoxi and Xiao, Chaojun and Tu, Cunchao and Zhang, Tianyang and Liu, Zhiyuan and Sun, Maosong},
64
+ booktitle={Proceedings of AAAI},
65
+ year={2020},
66
+ }
67
+
68
+ @article{Wang2021FromLT,
69
+ title={From LSAT: The Progress and Challenges of Complex Reasoning},
70
+ author={Siyuan Wang and Zhongkun Liu and Wanjun Zhong and Ming Zhou and Zhongyu Wei and Zhumin Chen and Nan Duan},
71
+ journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
72
+ year={2021},
73
+ volume={30},
74
+ pages={2201-2216}
75
+ }
76
+ ```
77
+
78
+ ### Groups and Tasks
79
+
80
+ #### Groups
81
+
82
+ - `agieval`: Evaluates all tasks listed below.
83
+
84
+ - `agieval_en`: Evaluates all English subtasks: `agieval_aqua_rat`, `agieval_gaokao_english`, `agieval_logiqa_en`, `agieval_lsat_*`, `agieval_sat_*`, `agieval_math`
85
+
86
+ - `agieval_cn`: Evaluates all Chinese subtasks:
87
+ `agieval_gaokao_biology`, `agieval_gaokao_chemistry`, `agieval_gaokao_chinese`, `agieval_gaokao_geography`,
88
+ `agieval_gaokao_history`, `agieval_gaokao_mathqa`, `agieval_gaokao_mathcloze`, `agieval_gaokao_physics`, `agieval_jec_qa_ca`, `agieval_jec_qa_kd`, `agieval_logiqa_zh`
89
+
90
+ - `agieval_nous`: Evaluates a specific subset of AGIEval tasks (multiple-choice and english-only), namely those in https://github.com/teknium1/LLM-Benchmark-Logs/blob/main/benchmark-logs/Mistral-7B-Base.md
91
+
92
+ #### Tasks
93
+
94
+ - `agieval_aqua_rat`
95
+ - `agieval_gaokao_biology`
96
+ - `agieval_gaokao_chemistry`
97
+ - `agieval_gaokao_chinese`
98
+ - `agieval_gaokao_english`
99
+ - `agieval_gaokao_geography`
100
+ - `agieval_gaokao_history`
101
+ - `agieval_gaokao_mathqa`
102
+ - `agieval_gaokao_mathcloze`
103
+ - `agieval_gaokao_physics`
104
+ - `agieval_jec_qa_ca`
105
+ - `agieval_jec_qa_kd`
106
+ - `agieval_logiqa_en`
107
+ - `agieval_logiqa_zh`
108
+ - `agieval_lsat_ar`
109
+ - `agieval_lsat_lr`
110
+ - `agieval_lsat_rc`
111
+ - `agieval_sat_en`
112
+ - `agieval_sat_en_without_passage`
113
+ - `agieval_sat_math`
114
+ - `agieval_math`
lm-evaluation/build/lib/lm_eval/tasks/agieval/aqua-rat.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_en
4
+ - agieval_nous
5
+ task: agieval_aqua_rat
6
+ dataset_path: hails/agieval-aqua-rat
7
+ dataset_name: null
8
+ output_type: multiple_choice
9
+ training_split: null
10
+ validation_split: null
11
+ test_split: test
12
+ doc_to_text: "{{query}}"
13
+ doc_to_target: "{{gold}}"
14
+ doc_to_choice: "{{choices}}"
15
+ process_results: !function utils.process_results_mcqa
16
+ metric_list:
17
+ - metric: acc
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ - metric: acc_norm
21
+ aggregation: mean
22
+ higher_is_better: true
23
+ metadata:
24
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-biology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_biology
6
+ dataset_path: hails/agieval-gaokao-biology
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-chemistry.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_chemistry
6
+ dataset_path: hails/agieval-gaokao-chemistry
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-chinese.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_chinese
6
+ dataset_path: hails/agieval-gaokao-chinese
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-english.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_en # categorizing as EN because the AGIEval codebase lists this as in `english_qa_tasks`
5
+ task: agieval_gaokao_english
6
+ dataset_path: hails/agieval-gaokao-english
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-geography.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_geography
6
+ dataset_path: hails/agieval-gaokao-geography
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-history.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_history
6
+ dataset_path: hails/agieval-gaokao-history
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-mathcloze.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_cn
4
+ task: agieval_gaokao_mathcloze
5
+ dataset_path: hails/agieval-gaokao-mathcloze
6
+ dataset_name: null
7
+ output_type: generate_until
8
+ training_split: null
9
+ validation_split: null
10
+ test_split: test
11
+ doc_to_text: "{{query}}"
12
+ doc_to_target: "{{answer}}"
13
+ process_results: !function utils.process_results
14
+ generation_kwargs:
15
+ max_gen_toks: 32
16
+ do_sample: False
17
+ temperature: 0.0
18
+ until:
19
+ - "Q:"
20
+ metric_list:
21
+ - metric: acc
22
+ aggregation: mean
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-mathqa.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_mathqa
6
+ dataset_path: hails/agieval-gaokao-mathqa
lm-evaluation/build/lib/lm_eval/tasks/agieval/gaokao-physics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_physics
6
+ dataset_path: hails/agieval-gaokao-physics
lm-evaluation/build/lib/lm_eval/tasks/agieval/jec-qa-ca.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_jec_qa_ca
6
+ dataset_path: hails/agieval-jec-qa-ca
lm-evaluation/build/lib/lm_eval/tasks/agieval/jec-qa-kd.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_jec_qa_kd
6
+ dataset_path: hails/agieval-jec-qa-kd
lm-evaluation/build/lib/lm_eval/tasks/agieval/logiqa-en.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_logiqa_en
7
+ dataset_path: hails/agieval-logiqa-en
lm-evaluation/build/lib/lm_eval/tasks/agieval/logiqa-zh.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_logiqa_zh
6
+ dataset_path: hails/agieval-logiqa-zh
lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-ar.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_ar
7
+ dataset_path: hails/agieval-lsat-ar
lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-lr.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_lr
7
+ dataset_path: hails/agieval-lsat-lr
lm-evaluation/build/lib/lm_eval/tasks/agieval/lsat-rc.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_rc
7
+ dataset_path: hails/agieval-lsat-rc
lm-evaluation/build/lib/lm_eval/tasks/agieval/math.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_en
4
+ task: agieval_math
5
+ dataset_path: hails/agieval-math
6
+ dataset_name: null
7
+ output_type: generate_until
8
+ training_split: null
9
+ validation_split: null
10
+ test_split: test
11
+ doc_to_text: "{{query}}"
12
+ doc_to_target: "{{answer}}"
13
+ process_results: !function utils.process_results
14
+ generation_kwargs:
15
+ max_gen_toks: 32
16
+ do_sample: False
17
+ temperature: 0.0
18
+ until:
19
+ - "Q:"
20
+ metric_list:
21
+ - metric: acc
22
+ aggregation: mean
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-en-without-passage.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_en_without_passage
7
+ dataset_path: hails/agieval-sat-en-without-passage
lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-en.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_en
7
+ dataset_path: hails/agieval-sat-en
lm-evaluation/build/lib/lm_eval/tasks/agieval/sat-math.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_math
7
+ dataset_path: hails/agieval-sat-math
lm-evaluation/build/lib/lm_eval/tasks/agieval/utils.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Answer parsing and normalization code, from
2
+ # https://github.com/ruixiangcui/AGIEval/blob/main/src/
3
+ # math_equivalence.py and post_process.py
4
+ import re
5
+ from typing import Dict, List
6
+
7
+ import numpy as np
8
+
9
+
10
+ def parse_math_answer(raw_string):
11
+ def remove_boxed(s):
12
+ left = "\\boxed{"
13
+ try:
14
+ assert s[: len(left)] == left
15
+ assert s[-1] == "}"
16
+ answer = s[len(left) : -1]
17
+ if "=" in answer:
18
+ answer = answer.split("=")[-1].lstrip(" ")
19
+ return answer
20
+ except Exception:
21
+ return None
22
+
23
+ def last_boxed_only_string(string):
24
+ idx = string.rfind("\\boxed")
25
+ if idx < 0:
26
+ idx = string.rfind("\\fbox")
27
+ if idx < 0:
28
+ return None
29
+ i = idx
30
+ right_brace_idx = None
31
+ num_left_braces_open = 0
32
+ while i < len(string):
33
+ if string[i] == "{":
34
+ num_left_braces_open += 1
35
+ if string[i] == "}":
36
+ num_left_braces_open -= 1
37
+ if num_left_braces_open == 0:
38
+ right_brace_idx = i
39
+ break
40
+ i += 1
41
+
42
+ if right_brace_idx is None:
43
+ retval = None
44
+ else:
45
+ retval = string[idx : right_brace_idx + 1]
46
+
47
+ return retval
48
+
49
+ def get_answer_with_dollar_sign(s):
50
+ first_pattern = "\$(.*)\$"
51
+ last_match = None
52
+ matches = re.findall(first_pattern, s)
53
+ if matches:
54
+ last_match = matches[-1]
55
+ if "=" in last_match:
56
+ last_match = last_match.split("=")[-1].lstrip(" ")
57
+ return last_match
58
+
59
+ def get_answer_without_dollar_sign(s):
60
+ last_match = None
61
+ if "=" in s:
62
+ last_match = s.split("=")[-1].lstrip(" ").rstrip(".")
63
+ if "\\n" in last_match:
64
+ last_match = last_match.split("\\n")[0]
65
+ else:
66
+ pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])"
67
+ matches = re.findall(pattern, s)
68
+ if matches:
69
+ last_match = matches[-1]
70
+ return last_match
71
+
72
+ if "\\boxed" in raw_string:
73
+ answer = remove_boxed(last_boxed_only_string(raw_string))
74
+ else:
75
+ answer = get_answer_with_dollar_sign(raw_string)
76
+ if not answer:
77
+ answer = get_answer_without_dollar_sign(raw_string)
78
+ return answer
79
+
80
+
81
+ # code from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py
82
+ def _fix_fracs(string):
83
+ substrs = string.split("\\frac")
84
+ new_str = substrs[0]
85
+ if len(substrs) > 1:
86
+ substrs = substrs[1:]
87
+ for substr in substrs:
88
+ new_str += "\\frac"
89
+ if substr[0] == "{":
90
+ new_str += substr
91
+ else:
92
+ try:
93
+ assert len(substr) >= 2
94
+ except Exception:
95
+ return string
96
+ a = substr[0]
97
+ b = substr[1]
98
+ if b != "{":
99
+ if len(substr) > 2:
100
+ post_substr = substr[2:]
101
+ new_str += "{" + a + "}{" + b + "}" + post_substr
102
+ else:
103
+ new_str += "{" + a + "}{" + b + "}"
104
+ else:
105
+ if len(substr) > 2:
106
+ post_substr = substr[2:]
107
+ new_str += "{" + a + "}" + b + post_substr
108
+ else:
109
+ new_str += "{" + a + "}" + b
110
+ string = new_str
111
+ return string
112
+
113
+
114
+ def _fix_a_slash_b(string):
115
+ if len(string.split("/")) != 2:
116
+ return string
117
+ a = string.split("/")[0]
118
+ b = string.split("/")[1]
119
+ try:
120
+ a = int(a)
121
+ b = int(b)
122
+ assert string == "{}/{}".format(a, b)
123
+ new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
124
+ return new_string
125
+ except Exception:
126
+ return string
127
+
128
+
129
+ def _remove_right_units(string):
130
+ # "\\text{ " only ever occurs (at least in the val set) when describing units
131
+ if "\\text{ " in string:
132
+ splits = string.split("\\text{ ")
133
+ assert len(splits) == 2
134
+ return splits[0]
135
+ else:
136
+ return string
137
+
138
+
139
+ def _fix_sqrt(string):
140
+ if "\\sqrt" not in string:
141
+ return string
142
+ splits = string.split("\\sqrt")
143
+ new_string = splits[0]
144
+ for split in splits[1:]:
145
+ if split[0] != "{":
146
+ a = split[0]
147
+ new_substr = "\\sqrt{" + a + "}" + split[1:]
148
+ else:
149
+ new_substr = "\\sqrt" + split
150
+ new_string += new_substr
151
+ return new_string
152
+
153
+
154
+ def _strip_string(string):
155
+ # linebreaks
156
+ string = string.replace("\n", "")
157
+ # print(string)
158
+
159
+ # remove inverse spaces
160
+ string = string.replace("\\!", "")
161
+ # print(string)
162
+
163
+ # replace \\ with \
164
+ string = string.replace("\\\\", "\\")
165
+ # print(string)
166
+
167
+ # replace tfrac and dfrac with frac
168
+ string = string.replace("tfrac", "frac")
169
+ string = string.replace("dfrac", "frac")
170
+ # print(string)
171
+
172
+ # remove \left and \right
173
+ string = string.replace("\\left", "")
174
+ string = string.replace("\\right", "")
175
+ # print(string)
176
+
177
+ # Remove circ (degrees)
178
+ string = string.replace("^{\\circ}", "")
179
+ string = string.replace("^\\circ", "")
180
+
181
+ # remove dollar signs
182
+ string = string.replace("\\$", "")
183
+
184
+ # remove units (on the right)
185
+ string = _remove_right_units(string)
186
+
187
+ # remove percentage
188
+ string = string.replace("\\%", "")
189
+ string = string.replace("\%", "")
190
+
191
+ # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
192
+ string = string.replace(" .", " 0.")
193
+ string = string.replace("{.", "{0.")
194
+ # if empty, return empty string
195
+ if len(string) == 0:
196
+ return string
197
+ if string[0] == ".":
198
+ string = "0" + string
199
+
200
+ # to consider: get rid of e.g. "k = " or "q = " at beginning
201
+ if len(string.split("=")) == 2:
202
+ if len(string.split("=")[0]) <= 2:
203
+ string = string.split("=")[1]
204
+
205
+ # fix sqrt3 --> sqrt{3}
206
+ string = _fix_sqrt(string)
207
+
208
+ # remove spaces
209
+ string = string.replace(" ", "")
210
+
211
+ # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
212
+ string = _fix_fracs(string)
213
+
214
+ # manually change 0.5 --> \frac{1}{2}
215
+ if string == "0.5":
216
+ string = "\\frac{1}{2}"
217
+
218
+ # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
219
+ string = _fix_a_slash_b(string)
220
+
221
+ return string
222
+
223
+
224
+ def is_equiv(str1, str2, verbose=False):
225
+ if str1 is None and str2 is None:
226
+ print("WARNING: Both None")
227
+ return True
228
+ if str1 is None or str2 is None:
229
+ return False
230
+
231
+ str1, str2 = parse_math_answer(str1), parse_math_answer(str2)
232
+
233
+ try:
234
+ ss1 = _strip_string(str1)
235
+ ss2 = _strip_string(str2)
236
+ if verbose:
237
+ print(ss1, ss2)
238
+ return ss1 == ss2
239
+ except Exception:
240
+ return str1 == str2
241
+
242
+
243
+ def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
244
+ candidate = results[0]
245
+
246
+ gold = doc["answer"]
247
+
248
+ if not gold:
249
+ print(doc, candidate, gold)
250
+ if is_equiv(candidate, gold):
251
+ retval = 1
252
+ else:
253
+ retval = 0
254
+
255
+ results = {
256
+ "acc": retval,
257
+ }
258
+ return results
259
+
260
+
261
+ # use a custom process_results() function, because AGIEval can have multiple valid answers
262
+ def process_results_mcqa(doc, results):
263
+ results = [result[0] for result in results]
264
+
265
+ gold = doc["gold"]
266
+
267
+ acc = 1.0 if int(np.argmax(results)) in gold else 0.0
268
+ completion_len = np.array([float(len(i)) for i in doc["choices"]])
269
+ acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0
270
+
271
+ return {
272
+ "acc": acc,
273
+ "acc_norm": acc_norm,
274
+ }
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_afr_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "afr_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_afr_Latn"
4
+ "test_split": "afr_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_apc_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "apc_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_apc_Arab"
4
+ "test_split": "apc_Arab"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_arb_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "arb_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_arb_Latn"
4
+ "test_split": "arb_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_bam_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "bam_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_bam_Latn"
4
+ "test_split": "bam_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ben_Beng.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ben_Beng"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ben_Beng"
4
+ "test_split": "ben_Beng"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ces_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ces_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ces_Latn"
4
+ "test_split": "ces_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_est_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "est_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_est_Latn"
4
+ "test_split": "est_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_fra_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "fra_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_fra_Latn"
4
+ "test_split": "fra_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_fuv_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "fuv_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_fuv_Latn"
4
+ "test_split": "fuv_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_heb_Hebr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "heb_Hebr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_heb_Hebr"
4
+ "test_split": "heb_Hebr"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hrv_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hrv_Latn"
4
+ "test_split": "hrv_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_hun_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hun_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hun_Latn"
4
+ "test_split": "hun_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_isl_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "isl_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_isl_Latn"
4
+ "test_split": "isl_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_kat_Geor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kat_Geor"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kat_Geor"
4
+ "test_split": "kat_Geor"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lao_Laoo"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lao_Laoo"
4
+ "test_split": "lao_Laoo"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_lit_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lit_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lit_Latn"
4
+ "test_split": "lit_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mya_Mymr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mya_Mymr"
4
+ "test_split": "mya_Mymr"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_nld_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "nld_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_nld_Latn"
4
+ "test_split": "nld_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_nya_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "nya_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_nya_Latn"
4
+ "test_split": "nya_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_pan_Guru.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "pan_Guru"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_pan_Guru"
4
+ "test_split": "pan_Guru"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_pol_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "pol_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_pol_Latn"
4
+ "test_split": "pol_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_som_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "som_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_som_Latn"
4
+ "test_split": "som_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ssw_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ssw_Latn"
4
+ "test_split": "ssw_Latn"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_tel_Telu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "tel_Telu"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_tel_Telu"
4
+ "test_split": "tel_Telu"
lm-evaluation/build/lib/lm_eval/tasks/belebele/belebele_uzn_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "uzn_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_uzn_Latn"
4
+ "test_split": "uzn_Latn"
lm-evaluation/build/lib/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ output_type: generate_until
2
+ test_split: null
3
+ doc_to_choice: null
4
+ metric_list:
5
+ - metric: exact_match
6
+ aggregation: mean
7
+ higher_is_better: true
8
+ generation_kwargs:
9
+ until:
10
+ - "</s>"
11
+ do_sample: false
12
+ temperature: 0.0
13
+ metadata:
14
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: flan_held_in
2
+ group_alias: Flan (Held-In)
3
+ task:
4
+ # ANLI R1
5
+ - group: anli_r1_flan
6
+ group_alias: ANLI R1
7
+ task:
8
+ - task: anli_r1
9
+ task_alias: prompt-0
10
+ include: _held_in_template_yaml
11
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
12
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
13
+ - task: anli_r1
14
+ task_alias: prompt-1
15
+ include: _held_in_template_yaml
16
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
17
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
18
+ - task: anli_r1
19
+ task_alias: prompt-2
20
+ include: _held_in_template_yaml
21
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
22
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
23
+ - task: anli_r1
24
+ task_alias: prompt-3
25
+ include: _held_in_template_yaml
26
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
27
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
28
+ - task: anli_r1
29
+ task_alias: prompt-4
30
+ include: _held_in_template_yaml
31
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
32
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
33
+ - task: anli_r1
34
+ task_alias: prompt-5
35
+ include: _held_in_template_yaml
36
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
37
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
38
+ - task: anli_r1
39
+ task_alias: prompt-6
40
+ include: _held_in_template_yaml
41
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
42
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
43
+ - task: anli_r1
44
+ task_alias: prompt-7
45
+ include: _held_in_template_yaml
46
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
47
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
48
+ - task: anli_r1
49
+ task_alias: prompt-8
50
+ include: _held_in_template_yaml
51
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
52
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
53
+ # ANLI R2
54
+ - group: anli_r2_flan
55
+ group_alias: ANLI R2
56
+ task:
57
+ - task: anli_r2
58
+ task_alias: prompt-0
59
+ include: _held_in_template_yaml
60
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
61
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
62
+ - task: anli_r2
63
+ task_alias: prompt-1
64
+ include: _held_in_template_yaml
65
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
66
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
67
+ - task: anli_r2
68
+ task_alias: prompt-2
69
+ include: _held_in_template_yaml
70
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
71
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
72
+ - task: anli_r2
73
+ task_alias: prompt-3
74
+ include: _held_in_template_yaml
75
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
76
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
77
+ - task: anli_r2
78
+ task_alias: prompt-4
79
+ include: _held_in_template_yaml
80
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
81
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
82
+ - task: anli_r2
83
+ task_alias: prompt-5
84
+ include: _held_in_template_yaml
85
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
86
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
87
+ - task: anli_r2
88
+ task_alias: prompt-6
89
+ include: _held_in_template_yaml
90
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
91
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
92
+ - task: anli_r2
93
+ task_alias: prompt-7
94
+ include: _held_in_template_yaml
95
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
96
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
97
+ - task: anli_r2
98
+ task_alias: prompt-8
99
+ include: _held_in_template_yaml
100
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
101
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
102
+ # ANLI R3
103
+ - group: anli_r3_flan
104
+ group_alias: ANLI R3
105
+ task:
106
+ - task: anli_r3
107
+ task_alias: prompt-0
108
+ include: _held_in_template_yaml
109
+ doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is"
110
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
111
+ - task: anli_r3
112
+ task_alias: prompt-1
113
+ include: _held_in_template_yaml
114
+ doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
115
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
116
+ - task: anli_r3
117
+ task_alias: prompt-2
118
+ include: _held_in_template_yaml
119
+ doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
120
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
121
+ - task: anli_r3
122
+ task_alias: prompt-3
123
+ include: _held_in_template_yaml
124
+ doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
125
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
126
+ - task: anli_r3
127
+ task_alias: prompt-4
128
+ include: _held_in_template_yaml
129
+ doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:"
130
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
131
+ - task: anli_r3
132
+ task_alias: prompt-5
133
+ include: _held_in_template_yaml
134
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n"
135
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
136
+ - task: anli_r3
137
+ task_alias: prompt-6
138
+ include: _held_in_template_yaml
139
+ doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
140
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
141
+ - task: anli_r3
142
+ task_alias: prompt-7
143
+ include: _held_in_template_yaml
144
+ doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
145
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
146
+ - task: anli_r3
147
+ task_alias: prompt-8
148
+ include: _held_in_template_yaml
149
+ doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No"
150
+ doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}"
151
+ # Arc Easy
152
+ - group: arc_easy_flan
153
+ group_alias: Arc Easy
154
+ task:
155
+ - task: arc_easy
156
+ task_alias: prompt-0
157
+ include: _held_in_template_yaml
158
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
159
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
160
+ - task: arc_easy
161
+ task_alias: prompt-1
162
+ include: _held_in_template_yaml
163
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
164
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
165
+ - task: arc_easy
166
+ task_alias: prompt-2
167
+ include: _held_in_template_yaml
168
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
169
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
170
+ - task: arc_easy
171
+ task_alias: prompt-3
172
+ include: _held_in_template_yaml
173
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
174
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
175
+ - task: arc_easy
176
+ task_alias: prompt-4
177
+ include: _held_in_template_yaml
178
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
179
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
180
+ - task: arc_easy
181
+ task_alias: prompt-5
182
+ include: _held_in_template_yaml
183
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
184
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
185
+ - task: arc_easy
186
+ task_alias: prompt-6
187
+ include: _held_in_template_yaml
188
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
189
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
190
+ # Arc Challenge
191
+ - group: arc_challenge_flan
192
+ group_alias: Arc Challenge
193
+ task:
194
+ - task: arc_challenge
195
+ task_alias: prompt-0
196
+ include: _held_in_template_yaml
197
+ doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
198
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
199
+ - task: arc_challenge
200
+ task_alias: prompt-1
201
+ include: _held_in_template_yaml
202
+ doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:"
203
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
204
+ - task: arc_challenge
205
+ task_alias: prompt-2
206
+ include: _held_in_template_yaml
207
+ doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
208
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
209
+ - task: arc_challenge
210
+ task_alias: prompt-3
211
+ include: _held_in_template_yaml
212
+ doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:"
213
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
214
+ - task: arc_challenge
215
+ task_alias: prompt-4
216
+ include: _held_in_template_yaml
217
+ doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
218
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
219
+ - task: arc_challenge
220
+ task_alias: prompt-5
221
+ include: _held_in_template_yaml
222
+ doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
223
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
224
+ - task: arc_challenge
225
+ task_alias: prompt-6
226
+ include: _held_in_template_yaml
227
+ doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}"
228
+ doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}"
229
+ # BoolQ
230
+ - group: boolq_flan
231
+ group_alias: BoolQ
232
+ task:
233
+ - task: boolq
234
+ task_alias: prompt-0
235
+ include: _held_in_template_yaml
236
+ doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes"
237
+ doc_to_target: "{{['no', 'yes'][label]}}"
238
+ - task: boolq
239
+ task_alias: prompt-1
240
+ include: _held_in_template_yaml
241
+ doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes"
242
+ doc_to_target: "{{['no', 'yes'][label]}}"
243
+ - task: boolq
244
+ task_alias: prompt-2
245
+ include: _held_in_template_yaml
246
+ doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes"
247
+ doc_to_target: "{{['no', 'yes'][label]}}"
248
+ - task: boolq
249
+ task_alias: prompt-3
250
+ include: _held_in_template_yaml
251
+ doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes"
252
+ doc_to_target: "{{['no', 'yes'][label]}}"
253
+ - task: boolq
254
+ task_alias: prompt-4
255
+ include: _held_in_template_yaml
256
+ doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
257
+ doc_to_target: "{{['no', 'yes'][label]}}"
258
+ - task: boolq
259
+ task_alias: prompt-5
260
+ include: _held_in_template_yaml
261
+ doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes"
262
+ doc_to_target: "{{['no', 'yes'][label]}}"
263
+ - task: boolq
264
+ task_alias: prompt-6
265
+ include: _held_in_template_yaml
266
+ doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes"
267
+ doc_to_target: "{{['no', 'yes'][label]}}"
268
+ - task: boolq
269
+ task_alias: prompt-7
270
+ include: _held_in_template_yaml
271
+ doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes"
272
+ doc_to_target: "{{['no', 'yes'][label]}}"
273
+ - task: boolq
274
+ task_alias: prompt-8
275
+ include: _held_in_template_yaml
276
+ doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes"
277
+ doc_to_target: "{{['no', 'yes'][label]}}"
278
+ - task: boolq
279
+ task_alias: prompt-9
280
+ include: _held_in_template_yaml
281
+ doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes"
282
+ doc_to_target: "{{['no', 'yes'][label]}}"
283
+ # RTE
284
+ - group: rte_flan
285
+ group_alias: RTE
286
+ task:
287
+ - task: rte
288
+ task_alias: prompt-0
289
+ include: _held_in_template_yaml
290
+ doc_to_text: "{{sentence1}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{sentence2}}\"?\n\nOPTIONS:\n- yes\n- no"
291
+ doc_to_target: "{{['yes', 'no'][label]}}"
292
+ - task: rte
293
+ task_alias: prompt-1
294
+ include: _held_in_template_yaml
295
+ doc_to_text: "{{sentence1}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
296
+ doc_to_target: "{{['yes', 'no'][label]}}"
297
+ - task: rte
298
+ task_alias: prompt-2
299
+ include: _held_in_template_yaml
300
+ doc_to_text: "{{sentence1}}\n\nQ with options: Can we draw the following conclusion?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
301
+ doc_to_target: "{{['yes', 'no'][label]}}"
302
+ - task: rte
303
+ task_alias: prompt-3
304
+ include: _held_in_template_yaml
305
+ doc_to_text: "{{sentence1}}\nDoes this next sentence follow, given the preceding text?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no"
306
+ doc_to_target: "{{['yes', 'no'][label]}}"
307
+ - task: rte
308
+ task_alias: prompt-4
309
+ include: _held_in_template_yaml
310
+ doc_to_text: "{{sentence1}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{sentence2}}"
311
+ doc_to_target: "{{['yes', 'no'][label]}}"
312
+ - task: rte
313
+ task_alias: prompt-5
314
+ include: _held_in_template_yaml
315
+ doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nThe answer is"
316
+ doc_to_target: "{{['yes', 'no'][label]}}"
317
+ - task: rte
318
+ task_alias: prompt-6
319
+ include: _held_in_template_yaml
320
+ doc_to_text: "Read the text and determine if the sentence is true:\n\n{{sentence1}}\n\nSentence: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
321
+ doc_to_target: "{{['yes', 'no'][label]}}"
322
+ - task: rte
323
+ task_alias: prompt-7
324
+ include: _held_in_template_yaml
325
+ doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:"
326
+ doc_to_target: "{{['yes', 'no'][label]}}"
327
+ - task: rte
328
+ task_alias: prompt-8
329
+ include: _held_in_template_yaml
330
+ doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{sentence2}}\n\n{{sentence1}}\nOPTIONS:\n- yes\n- no"
331
+ doc_to_target: "{{['yes', 'no'][label]}}"