Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/drop/README.md +53 -0
- lm-evaluation/build/lib/lm_eval/tasks/drop/default.yaml +26 -0
- lm-evaluation/build/lib/lm_eval/tasks/drop/utils.py +204 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/README.md +72 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/cola/default.yaml +16 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/default.yaml +14 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/mismatch.yaml +3 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/utils.py +6 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/mrpc/default.yaml +15 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/qnli/default.yaml +14 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/qqp/default.yaml +15 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/rte/default.yaml +14 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/sst2/default.yaml +14 -0
- lm-evaluation/build/lib/lm_eval/tasks/glue/wnli/default.yaml +14 -0
- lm-evaluation/build/lib/lm_eval/tasks/gsm8k/README.md +59 -0
- lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml +34 -0
- lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml +44 -0
- lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot.yaml +51 -0
- lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k.yaml +45 -0
- lm-evaluation/build/lib/lm_eval/tasks/headqa/README.md +57 -0
- lm-evaluation/build/lib/lm_eval/tasks/headqa/headqa_en.yaml +23 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/README.md +94 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/direct_yaml +35 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/cot_yaml +36 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml +12 -0
- lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml +12 -0
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "named_entity_recognition"
|
2 |
+
"description": "以下是关于古汉语命名体识别的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "aclue_named_entity_recognition"
|
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "poetry_appreciate"
|
2 |
+
"description": "以下是关于古诗词曲鉴赏的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "aclue_poetry_appreciate"
|
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "poetry_quality_assessment"
|
2 |
+
"description": "以下是关于古诗词质量评估的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "aclue_poetry_quality_assessment"
|
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "reading_comprehension"
|
2 |
+
"description": "以下是关于古文阅读理解的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "aclue_reading_comprehension"
|
lm-evaluation/build/lib/lm_eval/tasks/drop/README.md
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DROP
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs`
|
6 |
+
|
7 |
+
Abstract: https://aclanthology.org/attachments/N19-1246.Supplementary.pdf
|
8 |
+
|
9 |
+
DROP is a QA dataset which tests comprehensive understanding of paragraphs. In
|
10 |
+
this crowdsourced, adversarially-created, 96k question-answering benchmark, a
|
11 |
+
system must resolve multiple references in a question, map them onto a paragraph,
|
12 |
+
and perform discrete operations over them (such as addition, counting, or sorting).
|
13 |
+
|
14 |
+
Homepage: https://allenai.org/data/drop
|
15 |
+
|
16 |
+
Acknowledgement: This implementation is based on the official evaluation for `DROP`:
|
17 |
+
https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py
|
18 |
+
|
19 |
+
### Citation
|
20 |
+
|
21 |
+
```
|
22 |
+
@misc{dua2019drop,
|
23 |
+
title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},
|
24 |
+
author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
|
25 |
+
year={2019},
|
26 |
+
eprint={1903.00161},
|
27 |
+
archivePrefix={arXiv},
|
28 |
+
primaryClass={cs.CL}
|
29 |
+
}
|
30 |
+
```
|
31 |
+
|
32 |
+
### Groups and Tasks
|
33 |
+
|
34 |
+
#### Groups
|
35 |
+
|
36 |
+
* Not part of a group yet.
|
37 |
+
|
38 |
+
#### Tasks
|
39 |
+
|
40 |
+
* `drop`
|
41 |
+
|
42 |
+
### Checklist
|
43 |
+
|
44 |
+
For adding novel benchmarks/datasets to the library:
|
45 |
+
* [ ] Is the task an existing benchmark in the literature?
|
46 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
47 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
48 |
+
|
49 |
+
|
50 |
+
If other tasks on this dataset are already supported:
|
51 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
52 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
53 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/build/lib/lm_eval/tasks/drop/default.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
task: drop
|
2 |
+
dataset_path: EleutherAI/drop
|
3 |
+
output_type: generate_until
|
4 |
+
training_split: train
|
5 |
+
validation_split: validation
|
6 |
+
process_docs: !function utils.process_docs
|
7 |
+
doc_to_text: "{{passage}} {{question}}"
|
8 |
+
doc_to_target: "{{ answer|join(',')}}"
|
9 |
+
target_delimiter: ""
|
10 |
+
process_results: !function utils.process_results
|
11 |
+
should_decontaminate: true
|
12 |
+
doc_to_decontamination_query: "{{passage}} {{question}}"
|
13 |
+
generation_kwargs:
|
14 |
+
until:
|
15 |
+
- "."
|
16 |
+
metric_list:
|
17 |
+
- metric: em
|
18 |
+
aggregation: mean
|
19 |
+
higher_is_better: true
|
20 |
+
- metric: f1
|
21 |
+
aggregation: mean
|
22 |
+
higher_is_better: true
|
23 |
+
metadata:
|
24 |
+
version: 3.0
|
25 |
+
dataset_kwargs:
|
26 |
+
trust_remote_code: true
|
lm-evaluation/build/lib/lm_eval/tasks/drop/utils.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import string
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from scipy.optimize import linear_sum_assignment
|
6 |
+
|
7 |
+
|
8 |
+
_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE)
|
9 |
+
|
10 |
+
|
11 |
+
def process_docs(dataset):
|
12 |
+
def _process(doc):
|
13 |
+
return {
|
14 |
+
"id": doc["query_id"],
|
15 |
+
"passage": doc["passage"],
|
16 |
+
"question": doc["question"],
|
17 |
+
"answers": get_answers(doc),
|
18 |
+
}
|
19 |
+
|
20 |
+
return dataset.map(_process)
|
21 |
+
|
22 |
+
|
23 |
+
def get_answers(doc):
|
24 |
+
def _flatten_validated_answers(validated_answers):
|
25 |
+
"""Flattens a dict of lists of validated answers.
|
26 |
+
{"number": ['1', '8'], ...}
|
27 |
+
-> [{"number": ['1'], ...}, {"number": ['8'], ...}]
|
28 |
+
"""
|
29 |
+
valid_answers = []
|
30 |
+
for i in range(len(validated_answers["number"])):
|
31 |
+
valid_answers.append(
|
32 |
+
{
|
33 |
+
"number": validated_answers["number"][i],
|
34 |
+
"date": validated_answers["date"][i],
|
35 |
+
"spans": validated_answers["spans"][i],
|
36 |
+
}
|
37 |
+
)
|
38 |
+
return valid_answers
|
39 |
+
|
40 |
+
answers = []
|
41 |
+
answers_set = set()
|
42 |
+
candidates = [doc["answer"]] + _flatten_validated_answers(doc["validated_answers"])
|
43 |
+
for candidate in candidates:
|
44 |
+
answer = parse_answer(candidate)
|
45 |
+
if answer in answers_set:
|
46 |
+
continue
|
47 |
+
answers_set.add(answer)
|
48 |
+
answers.append(answer)
|
49 |
+
return answers
|
50 |
+
|
51 |
+
|
52 |
+
def parse_answer(answer):
|
53 |
+
# NOTE: Everything is returned as a tuple for uniformity and hashability.
|
54 |
+
if answer["number"] != "":
|
55 |
+
return (str(answer["number"]),)
|
56 |
+
if answer["spans"] != []:
|
57 |
+
return tuple(answer["spans"])
|
58 |
+
return (
|
59 |
+
" ".join(
|
60 |
+
[answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]]
|
61 |
+
).strip(),
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
def process_results(doc, results):
|
66 |
+
preds, golds = results, doc["answers"]
|
67 |
+
max_em = 0
|
68 |
+
max_f1 = 0
|
69 |
+
for gold_answer in golds:
|
70 |
+
exact_match, f1_score = get_metrics(preds, gold_answer)
|
71 |
+
if gold_answer[0].strip():
|
72 |
+
max_em = max(max_em, exact_match)
|
73 |
+
max_f1 = max(max_f1, f1_score)
|
74 |
+
return {"em": max_em, "f1": max_f1}
|
75 |
+
|
76 |
+
|
77 |
+
def get_metrics(predicted, gold):
|
78 |
+
"""
|
79 |
+
Takes a predicted answer and a gold answer (that are both either a string or a list of
|
80 |
+
strings), and returns exact match and the DROP F1 metric for the prediction. If you are
|
81 |
+
writing a script for evaluating objects in memory (say, the output of predictions during
|
82 |
+
validation, or while training), this is the function you want to call, after using
|
83 |
+
:func:`answer_json_to_strings` when reading the gold answer from the released data file.
|
84 |
+
"""
|
85 |
+
predicted_bags = _answer_to_bags(predicted)
|
86 |
+
gold_bags = _answer_to_bags(gold)
|
87 |
+
|
88 |
+
if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len(
|
89 |
+
gold_bags[0]
|
90 |
+
):
|
91 |
+
exact_match = 1.0
|
92 |
+
else:
|
93 |
+
exact_match = 0.0
|
94 |
+
|
95 |
+
f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1])
|
96 |
+
f1 = np.mean(f1_per_bag)
|
97 |
+
f1 = round(f1, 2)
|
98 |
+
return exact_match, f1
|
99 |
+
|
100 |
+
|
101 |
+
def _answer_to_bags(answer):
|
102 |
+
if isinstance(answer, (list, tuple)):
|
103 |
+
raw_spans = answer
|
104 |
+
else:
|
105 |
+
raw_spans = [answer]
|
106 |
+
normalized_spans = []
|
107 |
+
token_bags = []
|
108 |
+
for raw_span in raw_spans:
|
109 |
+
normalized_span = _normalize(raw_span)
|
110 |
+
normalized_spans.append(normalized_span)
|
111 |
+
token_bags.append(set(normalized_span.split()))
|
112 |
+
return normalized_spans, token_bags
|
113 |
+
|
114 |
+
|
115 |
+
def _align_bags(predicted, gold):
|
116 |
+
"""
|
117 |
+
Takes gold and predicted answer sets and first finds the optimal 1-1 alignment
|
118 |
+
between them and gets maximum metric values over all the answers.
|
119 |
+
"""
|
120 |
+
scores = np.zeros([len(gold), len(predicted)])
|
121 |
+
for gold_index, gold_item in enumerate(gold):
|
122 |
+
for pred_index, pred_item in enumerate(predicted):
|
123 |
+
if _match_numbers_if_present(gold_item, pred_item):
|
124 |
+
scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item)
|
125 |
+
row_ind, col_ind = linear_sum_assignment(-scores)
|
126 |
+
|
127 |
+
max_scores = np.zeros([max(len(gold), len(predicted))])
|
128 |
+
for row, column in zip(row_ind, col_ind):
|
129 |
+
max_scores[row] = max(max_scores[row], scores[row, column])
|
130 |
+
return max_scores
|
131 |
+
|
132 |
+
|
133 |
+
def _compute_f1(predicted_bag, gold_bag):
|
134 |
+
intersection = len(gold_bag.intersection(predicted_bag))
|
135 |
+
if not predicted_bag:
|
136 |
+
precision = 1.0
|
137 |
+
else:
|
138 |
+
precision = intersection / float(len(predicted_bag))
|
139 |
+
if not gold_bag:
|
140 |
+
recall = 1.0
|
141 |
+
else:
|
142 |
+
recall = intersection / float(len(gold_bag))
|
143 |
+
f1 = (
|
144 |
+
(2 * precision * recall) / (precision + recall)
|
145 |
+
if not (precision == 0.0 and recall == 0.0)
|
146 |
+
else 0.0
|
147 |
+
)
|
148 |
+
return f1
|
149 |
+
|
150 |
+
|
151 |
+
def _match_numbers_if_present(gold_bag, predicted_bag):
|
152 |
+
gold_numbers = set()
|
153 |
+
predicted_numbers = set()
|
154 |
+
for word in gold_bag:
|
155 |
+
if _is_number(word):
|
156 |
+
gold_numbers.add(word)
|
157 |
+
for word in predicted_bag:
|
158 |
+
if _is_number(word):
|
159 |
+
predicted_numbers.add(word)
|
160 |
+
if (not gold_numbers) or gold_numbers.intersection(predicted_numbers):
|
161 |
+
return True
|
162 |
+
return False
|
163 |
+
|
164 |
+
|
165 |
+
def _is_number(text):
|
166 |
+
try:
|
167 |
+
float(text)
|
168 |
+
return True
|
169 |
+
except ValueError:
|
170 |
+
return False
|
171 |
+
|
172 |
+
|
173 |
+
def _remove_articles(text):
|
174 |
+
return _ARTICLES.sub(" ", text)
|
175 |
+
|
176 |
+
|
177 |
+
def _white_space_fix(text):
|
178 |
+
return " ".join(text.split())
|
179 |
+
|
180 |
+
|
181 |
+
def _remove_punc(text):
|
182 |
+
exclude = set(string.punctuation)
|
183 |
+
if not _is_number(text):
|
184 |
+
return "".join(ch for ch in text if ch not in exclude)
|
185 |
+
else:
|
186 |
+
return text
|
187 |
+
|
188 |
+
|
189 |
+
def _fix_number(text):
|
190 |
+
return str(float(text)) if _is_number(text) else text
|
191 |
+
|
192 |
+
|
193 |
+
def _tokenize(text):
|
194 |
+
return re.split(" |-", text)
|
195 |
+
|
196 |
+
|
197 |
+
def _normalize(answer):
|
198 |
+
tokens = [
|
199 |
+
_white_space_fix(_remove_articles(_fix_number(_remove_punc(token.lower()))))
|
200 |
+
for token in _tokenize(answer)
|
201 |
+
]
|
202 |
+
tokens = [token for token in tokens if token.strip()]
|
203 |
+
normalized = " ".join(tokens).strip()
|
204 |
+
return normalized
|
lm-evaluation/build/lib/lm_eval/tasks/glue/README.md
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GLUE
|
2 |
+
**NOTE**: GLUE benchmark tasks do not provide publicly accessible labels for their test sets, so we default to the validation sets for all sub-tasks.
|
3 |
+
|
4 |
+
### Paper
|
5 |
+
|
6 |
+
Title: `GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding`
|
7 |
+
|
8 |
+
Abstract: https://openreview.net/pdf?id=rJ4km2R5t7
|
9 |
+
|
10 |
+
The General Language Understanding Evaluation (GLUE) benchmark is a collection of
|
11 |
+
resources for training, evaluating, and analyzing natural language understanding
|
12 |
+
systems. GLUE consists of:
|
13 |
+
- A benchmark of nine sentence- or sentence-pair language understanding tasks built
|
14 |
+
on established existing datasets and selected to cover a diverse range of dataset
|
15 |
+
sizes, text genres, and degrees of difficulty, and
|
16 |
+
- A diagnostic dataset designed to evaluate and analyze model performance with
|
17 |
+
respect to a wide range of linguistic phenomena found in natural language.
|
18 |
+
|
19 |
+
Homepage: https://gluebenchmark.com/
|
20 |
+
|
21 |
+
### Citation
|
22 |
+
|
23 |
+
```
|
24 |
+
@inproceedings{wang-etal-2018-glue,
|
25 |
+
title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding",
|
26 |
+
author = "Wang, Alex and
|
27 |
+
Singh, Amanpreet and
|
28 |
+
Michael, Julian and
|
29 |
+
Hill, Felix and
|
30 |
+
Levy, Omer and
|
31 |
+
Bowman, Samuel",
|
32 |
+
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
|
33 |
+
month = nov,
|
34 |
+
year = "2018",
|
35 |
+
address = "Brussels, Belgium",
|
36 |
+
publisher = "Association for Computational Linguistics",
|
37 |
+
url = "https://aclanthology.org/W18-5446",
|
38 |
+
doi = "10.18653/v1/W18-5446",
|
39 |
+
pages = "353--355",
|
40 |
+
abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.",
|
41 |
+
}
|
42 |
+
```
|
43 |
+
|
44 |
+
### Groups and Tasks
|
45 |
+
|
46 |
+
#### Groups
|
47 |
+
|
48 |
+
* `glue`: Run all Glue subtasks.
|
49 |
+
|
50 |
+
#### Tasks
|
51 |
+
|
52 |
+
* `cola`
|
53 |
+
* `mnli`
|
54 |
+
* `mrpc`
|
55 |
+
* `qnli`
|
56 |
+
* `qqp`
|
57 |
+
* `rte`
|
58 |
+
* `sst`
|
59 |
+
* `wnli`
|
60 |
+
|
61 |
+
### Checklist
|
62 |
+
|
63 |
+
For adding novel benchmarks/datasets to the library:
|
64 |
+
* [ ] Is the task an existing benchmark in the literature?
|
65 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
66 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
67 |
+
|
68 |
+
|
69 |
+
If other tasks on this dataset are already supported:
|
70 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
71 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
72 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/build/lib/lm_eval/tasks/glue/cola/default.yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: cola
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: cola
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["no", "yes"]
|
11 |
+
should_decontaminate: true
|
12 |
+
doc_to_decontamination_query: sentence
|
13 |
+
metric_list:
|
14 |
+
- metric: mcc
|
15 |
+
metadata:
|
16 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/default.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: mnli
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: mnli
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation_matched
|
8 |
+
doc_to_text: !function utils.doc_to_text
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["True", "Neither", "False"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
metadata:
|
14 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/mismatch.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: default.yaml
|
2 |
+
task: mnli_mismatch
|
3 |
+
validation_split: validation_mismatched
|
lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/utils.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def doc_to_text(doc) -> str:
|
2 |
+
return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format(
|
3 |
+
doc["premise"],
|
4 |
+
doc["hypothesis"].strip()
|
5 |
+
+ ("" if doc["hypothesis"].strip().endswith(".") else "."),
|
6 |
+
)
|
lm-evaluation/build/lib/lm_eval/tasks/glue/mrpc/default.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: mrpc
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: mrpc
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["no", "yes"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
- metric: f1
|
14 |
+
metadata:
|
15 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/qnli/default.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: qnli
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: qnli
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["yes", "no"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
metadata:
|
14 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/qqp/default.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: qqp
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: qqp
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "Question 1: {{question1}}\nQuestion 2: {{question2}}\nQuestion: Do both questions ask the same thing?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["no", "yes"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
- metric: f1
|
14 |
+
metadata:
|
15 |
+
version: 2.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/rte/default.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: rte
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: rte
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["True", "False"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
metadata:
|
14 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/sst2/default.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: sst2
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: sst2
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["negative", "positive"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
metadata:
|
14 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/glue/wnli/default.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: glue
|
2 |
+
task: wnli
|
3 |
+
dataset_path: glue
|
4 |
+
dataset_name: wnli
|
5 |
+
output_type: multiple_choice
|
6 |
+
training_split: train
|
7 |
+
validation_split: validation
|
8 |
+
doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
|
9 |
+
doc_to_target: label
|
10 |
+
doc_to_choice: ["False", "True"]
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
metadata:
|
14 |
+
version: 2.0
|
lm-evaluation/build/lib/lm_eval/tasks/gsm8k/README.md
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GSM8k
|
2 |
+
|
3 |
+
## Paper
|
4 |
+
Training Verifiers to Solve Math Word Problems
|
5 |
+
https://arxiv.org/abs/2110.14168
|
6 |
+
|
7 |
+
State-of-the-art language models can match human performance on many tasks, but
|
8 |
+
they still struggle to robustly perform multi-step mathematical reasoning. To
|
9 |
+
diagnose the failures of current models and support research, we introduce GSM8K,
|
10 |
+
a dataset of 8.5K high quality linguistically diverse grade school math word problems.
|
11 |
+
We find that even the largest transformer models fail to achieve high test performance,
|
12 |
+
despite the conceptual simplicity of this problem distribution.
|
13 |
+
|
14 |
+
NOTE: See the official implementation of the task:
|
15 |
+
https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py
|
16 |
+
for how to make use of the dataset's calculator annotations in your language
|
17 |
+
model's sample/generation function.
|
18 |
+
|
19 |
+
Homepage: https://github.com/openai/grade-school-math
|
20 |
+
|
21 |
+
|
22 |
+
## Citation
|
23 |
+
```
|
24 |
+
@misc{cobbe2021training,
|
25 |
+
title={Training Verifiers to Solve Math Word Problems},
|
26 |
+
author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
|
27 |
+
year={2021},
|
28 |
+
eprint={2110.14168},
|
29 |
+
archivePrefix={arXiv},
|
30 |
+
primaryClass={cs.LG}
|
31 |
+
}
|
32 |
+
```
|
33 |
+
|
34 |
+
### Groups and Tasks
|
35 |
+
|
36 |
+
#### Groups
|
37 |
+
|
38 |
+
- `math_word_problems`
|
39 |
+
- `chain_of_thought`
|
40 |
+
- `self_consistency`
|
41 |
+
|
42 |
+
#### Tasks
|
43 |
+
|
44 |
+
- `gsm8k_yaml`
|
45 |
+
- `gsm8k_cot`: GSM8K with Chain-of-Thought
|
46 |
+
- `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency
|
47 |
+
|
48 |
+
### Checklist
|
49 |
+
|
50 |
+
- [x] Is in Eval-harness v1.0 ?
|
51 |
+
- [ ] Has been checked for regression from v1.0?
|
52 |
+
- [ ] Has been checked for equivalence with original paper methodology?
|
53 |
+
- [ ] "Main" checked variant clearly denoted?
|
54 |
+
|
55 |
+
### Variant Wishlist
|
56 |
+
|
57 |
+
- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
|
58 |
+
- [ ] Using Verifiers
|
59 |
+
- [ ] Majority voting "without CoT"
|
lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: gsm8k-cot.yaml
|
2 |
+
group:
|
3 |
+
- chain_of_thought
|
4 |
+
- self_consistency
|
5 |
+
task: gsm8k_cot_self_consistency
|
6 |
+
generation_kwargs:
|
7 |
+
until:
|
8 |
+
- "Q:"
|
9 |
+
- "\n\n"
|
10 |
+
do_sample: true
|
11 |
+
temperature: 0.2
|
12 |
+
repeats: 64
|
13 |
+
filter_list:
|
14 |
+
- name: "score-first" # pick only the first response, and report metrics on that
|
15 |
+
filter:
|
16 |
+
- function: "regex"
|
17 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
|
18 |
+
- function: "take_first"
|
19 |
+
- name: "maj@64"
|
20 |
+
filter:
|
21 |
+
- function: "regex"
|
22 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
|
23 |
+
- function: "majority_vote"
|
24 |
+
- function: "take_first"
|
25 |
+
- name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal.
|
26 |
+
filter:
|
27 |
+
- function: "take_first_k"
|
28 |
+
k: 8
|
29 |
+
- function: "regex"
|
30 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
|
31 |
+
- function: "majority_vote"
|
32 |
+
- function: "take_first"
|
33 |
+
metadata:
|
34 |
+
version: 2.0
|
lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- math_word_problems
|
3 |
+
task: gsm8k_cot_zeroshot
|
4 |
+
dataset_path: gsm8k
|
5 |
+
dataset_name: main
|
6 |
+
output_type: generate_until
|
7 |
+
training_split: train
|
8 |
+
fewshot_split: train
|
9 |
+
test_split: test
|
10 |
+
doc_to_text: "Q: {{question}}\nA: Let's think step by step."
|
11 |
+
doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_case: true
|
17 |
+
ignore_punctuation: false
|
18 |
+
regexes_to_ignore:
|
19 |
+
- ","
|
20 |
+
- "\\$"
|
21 |
+
- "(?s).*#### "
|
22 |
+
- "\\.$"
|
23 |
+
generation_kwargs:
|
24 |
+
until:
|
25 |
+
- "Q:"
|
26 |
+
- "</s>"
|
27 |
+
- "<|im_end|>"
|
28 |
+
do_sample: false
|
29 |
+
repeats: 1
|
30 |
+
num_fewshot: 0
|
31 |
+
filter_list:
|
32 |
+
- name: "strict-match"
|
33 |
+
filter:
|
34 |
+
- function: "regex"
|
35 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
|
36 |
+
- function: "take_first"
|
37 |
+
- name: "flexible-extract"
|
38 |
+
filter:
|
39 |
+
- function: "regex"
|
40 |
+
group_select: -1
|
41 |
+
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
|
42 |
+
- function: "take_first"
|
43 |
+
metadata:
|
44 |
+
version: 3.0
|
lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot.yaml
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- chain_of_thought
|
3 |
+
task: gsm8k_cot
|
4 |
+
dataset_path: gsm8k
|
5 |
+
dataset_name: main
|
6 |
+
output_type: generate_until
|
7 |
+
test_split: test
|
8 |
+
doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\
|
9 |
+
Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\
|
10 |
+
Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\
|
11 |
+
Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\
|
12 |
+
Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\
|
13 |
+
Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\
|
14 |
+
Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\
|
15 |
+
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\
|
16 |
+
Q: {{question}}\nA:"
|
17 |
+
doc_to_target: "{{answer.split('####')[-1].strip()}}"
|
18 |
+
metric_list:
|
19 |
+
- metric: exact_match
|
20 |
+
aggregation: mean
|
21 |
+
higher_is_better: true
|
22 |
+
ignore_case: true
|
23 |
+
ignore_punctuation: false
|
24 |
+
regexes_to_ignore:
|
25 |
+
- ","
|
26 |
+
- "\\$"
|
27 |
+
- "(?s).*#### "
|
28 |
+
- "\\.$"
|
29 |
+
generation_kwargs:
|
30 |
+
until:
|
31 |
+
- "Q:"
|
32 |
+
- "</s>"
|
33 |
+
- "<|im_end|>"
|
34 |
+
do_sample: false
|
35 |
+
repeats: 1
|
36 |
+
num_fewshot: 0
|
37 |
+
filter_list:
|
38 |
+
- name: "strict-match"
|
39 |
+
filter:
|
40 |
+
- function: "regex"
|
41 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
|
42 |
+
- function: "take_first"
|
43 |
+
- name: "flexible-extract"
|
44 |
+
filter:
|
45 |
+
- function: "regex"
|
46 |
+
group_select: -1
|
47 |
+
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
|
48 |
+
- function: "take_first"
|
49 |
+
metadata:
|
50 |
+
version: 3.0
|
51 |
+
num_fewshot: 8
|
lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k.yaml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- math_word_problems
|
3 |
+
task: gsm8k
|
4 |
+
dataset_path: gsm8k
|
5 |
+
dataset_name: main
|
6 |
+
output_type: generate_until
|
7 |
+
training_split: train
|
8 |
+
fewshot_split: train
|
9 |
+
test_split: test
|
10 |
+
doc_to_text: "Question: {{question}}\nAnswer:"
|
11 |
+
doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_case: true
|
17 |
+
ignore_punctuation: false
|
18 |
+
regexes_to_ignore:
|
19 |
+
- ","
|
20 |
+
- "\\$"
|
21 |
+
- "(?s).*#### "
|
22 |
+
- "\\.$"
|
23 |
+
generation_kwargs:
|
24 |
+
until:
|
25 |
+
- "Question:"
|
26 |
+
- "</s>"
|
27 |
+
- "<|im_end|>"
|
28 |
+
do_sample: false
|
29 |
+
temperature: 0.0
|
30 |
+
repeats: 1
|
31 |
+
num_fewshot: 5
|
32 |
+
filter_list:
|
33 |
+
- name: "strict-match"
|
34 |
+
filter:
|
35 |
+
- function: "regex"
|
36 |
+
regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
|
37 |
+
- function: "take_first"
|
38 |
+
- name: "flexible-extract"
|
39 |
+
filter:
|
40 |
+
- function: "regex"
|
41 |
+
group_select: -1
|
42 |
+
regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
|
43 |
+
- function: "take_first"
|
44 |
+
metadata:
|
45 |
+
version: 3.0
|
lm-evaluation/build/lib/lm_eval/tasks/headqa/README.md
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# HEAD-QA
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
HEAD-QA: A Healthcare Dataset for Complex Reasoning
|
6 |
+
https://arxiv.org/pdf/1906.04701.pdf
|
7 |
+
|
8 |
+
HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the
|
9 |
+
Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio
|
10 |
+
de Sanidad, Consumo y Bienestar Social.
|
11 |
+
The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.
|
12 |
+
|
13 |
+
Homepage: https://aghie.github.io/head-qa/
|
14 |
+
|
15 |
+
|
16 |
+
### Citation
|
17 |
+
|
18 |
+
```
|
19 |
+
@inproceedings{vilares-gomez-rodriguez-2019-head,
|
20 |
+
title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning",
|
21 |
+
author = "Vilares, David and
|
22 |
+
G{\'o}mez-Rodr{\'i}guez, Carlos",
|
23 |
+
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
|
24 |
+
month = jul,
|
25 |
+
year = "2019",
|
26 |
+
address = "Florence, Italy",
|
27 |
+
publisher = "Association for Computational Linguistics",
|
28 |
+
url = "https://www.aclweb.org/anthology/P19-1092",
|
29 |
+
doi = "10.18653/v1/P19-1092",
|
30 |
+
pages = "960--966",
|
31 |
+
abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.",
|
32 |
+
}
|
33 |
+
```
|
34 |
+
|
35 |
+
### Groups and Tasks
|
36 |
+
|
37 |
+
#### Groups
|
38 |
+
|
39 |
+
- `headqa`: Evaluates `headqa_en` and `headqa_es`
|
40 |
+
|
41 |
+
#### Tasks
|
42 |
+
|
43 |
+
* `headqa_en` - English variant of HEAD-QA
|
44 |
+
* `headqa_es` - Spanish variant of HEAD-QA
|
45 |
+
|
46 |
+
### Checklist
|
47 |
+
|
48 |
+
* [x] Is the task an existing benchmark in the literature?
|
49 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
50 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
51 |
+
|
52 |
+
|
53 |
+
If other tasks on this dataset are already supported:
|
54 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
55 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
56 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\
|
57 |
+
* [x] Same as LM Evaluation Harness v0.3.0 implementation
|
lm-evaluation/build/lib/lm_eval/tasks/headqa/headqa_en.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- headqa
|
3 |
+
task: headqa_en
|
4 |
+
dataset_path: EleutherAI/headqa
|
5 |
+
dataset_name: en
|
6 |
+
output_type: multiple_choice
|
7 |
+
training_split: train
|
8 |
+
validation_split: validation
|
9 |
+
test_split: test
|
10 |
+
doc_to_text: "Question: {{qtext}}\nAnswer:"
|
11 |
+
doc_to_target: "{{ra - 1}}"
|
12 |
+
doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int.
|
13 |
+
should_decontaminate: true
|
14 |
+
doc_to_decontamination_query: query
|
15 |
+
metric_list:
|
16 |
+
- metric: acc
|
17 |
+
aggregation: mean
|
18 |
+
higher_is_better: true
|
19 |
+
- metric: acc_norm
|
20 |
+
aggregation: mean
|
21 |
+
higher_is_better: true
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/README.md
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MGSM
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `Language Models are Multilingual Chain-of-Thought Reasoners`
|
6 |
+
|
7 |
+
Abstract: https://arxiv.org/abs/2210.03057
|
8 |
+
|
9 |
+
Multilingual Grade School Math Benchmark (MGSM) is a benchmark of grade-school math problems, proposed in the paper [Language models are multilingual chain-of-thought reasoners](http://arxiv.org/abs/2210.03057).
|
10 |
+
|
11 |
+
The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are:
|
12 |
+
- Spanish
|
13 |
+
- French
|
14 |
+
- German
|
15 |
+
- Russian
|
16 |
+
- Chinese
|
17 |
+
- Japanese
|
18 |
+
- Thai
|
19 |
+
- Swahili
|
20 |
+
- Bengali
|
21 |
+
- Telugu
|
22 |
+
|
23 |
+
GSM8K (Grade School Math 8K) is a dataset of 8.5K high quality linguistically diverse grade school math word problems. The dataset was created to support the task of question answering on basic mathematical problems that require multi-step reasoning.
|
24 |
+
|
25 |
+
You can find the input and targets for each of the ten languages (and English) as `.tsv` files.
|
26 |
+
We also include few-shot exemplars that are also manually translated from each language in `exemplars.py`.
|
27 |
+
|
28 |
+
Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm
|
29 |
+
|
30 |
+
|
31 |
+
### Citation
|
32 |
+
|
33 |
+
```
|
34 |
+
@misc{cobbe2021training,
|
35 |
+
title={Training Verifiers to Solve Math Word Problems},
|
36 |
+
author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
|
37 |
+
year={2021},
|
38 |
+
eprint={2110.14168},
|
39 |
+
archivePrefix={arXiv},
|
40 |
+
primaryClass={cs.LG}
|
41 |
+
}
|
42 |
+
@misc{shi2022language,
|
43 |
+
title={Language Models are Multilingual Chain-of-Thought Reasoners},
|
44 |
+
author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei},
|
45 |
+
year={2022},
|
46 |
+
eprint={2210.03057},
|
47 |
+
archivePrefix={arXiv},
|
48 |
+
primaryClass={cs.CL}
|
49 |
+
}
|
50 |
+
```
|
51 |
+
|
52 |
+
### Groups and Tasks
|
53 |
+
|
54 |
+
#### Groups
|
55 |
+
|
56 |
+
* `mgsm_direct`: Direct question
|
57 |
+
* `mgsm_direct_bn`: Bengali
|
58 |
+
* `mgsm_direct_de`: German
|
59 |
+
* `mgsm_direct_en`: English
|
60 |
+
* `mgsm_direct_es`: Spanish
|
61 |
+
* `mgsm_direct_fr`: French
|
62 |
+
* `mgsm_direct_ja`: Japanese
|
63 |
+
* `mgsm_direct_ru`: Russian
|
64 |
+
* `mgsm_direct_sw`: Swahili
|
65 |
+
* `mgsm_direct_te`: Telugu
|
66 |
+
* `mgsm_direct_th`: Thai
|
67 |
+
* `mgsm_direct_zh`: Chinese
|
68 |
+
* `mgsm_cot_native`: Question with Answer followed by CoT prompt in the same language as the dataset.
|
69 |
+
* `mgsm_cot_native_bn`: Bengali
|
70 |
+
* `mgsm_cot_native_de`: German
|
71 |
+
* `mgsm_cot_native_en`: English
|
72 |
+
* `mgsm_cot_native_es`: Spanish
|
73 |
+
* `mgsm_cot_native_fr`: French
|
74 |
+
* `mgsm_cot_native_ja`: Japanese
|
75 |
+
* `mgsm_cot_native_ru`: Russian
|
76 |
+
* `mgsm_cot_native_sw`: Swahili
|
77 |
+
* `mgsm_cot_native_te`: Telugu
|
78 |
+
* `mgsm_cot_native_th`: Thai
|
79 |
+
* `mgsm_cot_native_zh`: Chinese
|
80 |
+
|
81 |
+
Examplar Samples: https://github.com/google-research/url-nlp/blob/main/mgsm/exemplars.py
|
82 |
+
|
83 |
+
### Checklist
|
84 |
+
|
85 |
+
For adding novel benchmarks/datasets to the library:
|
86 |
+
* [ ] Is the task an existing benchmark in the literature?
|
87 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
88 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
89 |
+
|
90 |
+
|
91 |
+
If other tasks on this dataset are already supported:
|
92 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
93 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
94 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/direct_yaml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: mgsm_direct
|
5 |
+
dataset_path: juletxara/mgsm
|
6 |
+
dataset_name: null # Overridden by language-specific config.
|
7 |
+
output_type: generate_until
|
8 |
+
training_split: train
|
9 |
+
test_split: test
|
10 |
+
target_delimiter: ""
|
11 |
+
generation_kwargs:
|
12 |
+
until:
|
13 |
+
- "\n\n"
|
14 |
+
- "\n"
|
15 |
+
do_sample: false
|
16 |
+
temperature: 0.0
|
17 |
+
filter_list:
|
18 |
+
- name: remove_whitespace
|
19 |
+
filter:
|
20 |
+
- function: remove_whitespace
|
21 |
+
- function: take_first
|
22 |
+
- filter:
|
23 |
+
- function: regex
|
24 |
+
group_select: -1
|
25 |
+
regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
|
26 |
+
- function: take_first
|
27 |
+
name: flexible-extract
|
28 |
+
metric_list:
|
29 |
+
- metric: exact_match
|
30 |
+
aggregation: mean
|
31 |
+
higher_is_better: true
|
32 |
+
ignore_case: true
|
33 |
+
ignore_punctuation: true
|
34 |
+
metadata:
|
35 |
+
version: 2.0
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: bn
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"প্রশ্ন: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'প্রশ্ন:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_bn
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: de
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAntwort:"}}{% else %}{{"Frage: "+question+"\nAntwort:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Frage:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_de
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: en
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Question: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Question:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_en
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Pregunta:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_es
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fr
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nRéponse :"}}{% else %}{{"Question : "+question+"\nRéponse :"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Question :'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_fr
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ja
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"問題: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- '問題:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_ja
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ru
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Задача: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Задача:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_ru
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sw
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Swali: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Swali:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_sw
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: te
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"ప్రశ్న: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'ప్రశ్న:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_te
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: th
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"โจทย์: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'โจทย์:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_th
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: zh
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"问题: "+question+"\nAnswer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- '问题:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: direct_yaml
|
12 |
+
task: mgsm_direct_zh
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/cot_yaml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
group: mgsm_cot_native
|
5 |
+
dataset_path: juletxara/mgsm
|
6 |
+
dataset_name: null # Overridden by language-specific config.
|
7 |
+
output_type: generate_until
|
8 |
+
training_split: train
|
9 |
+
test_split: test
|
10 |
+
generation_kwargs:
|
11 |
+
until:
|
12 |
+
- "\n\n"
|
13 |
+
- "\n"
|
14 |
+
do_sample: false
|
15 |
+
temperature: 0.0
|
16 |
+
target_delimiter: " "
|
17 |
+
metric_list:
|
18 |
+
- metric: exact_match
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
21 |
+
ignore_case: true
|
22 |
+
ignore_punctuation: true
|
23 |
+
filter_list:
|
24 |
+
- name: "strict-match"
|
25 |
+
filter:
|
26 |
+
- function: "regex"
|
27 |
+
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)"
|
28 |
+
- function: "take_first"
|
29 |
+
- filter:
|
30 |
+
- function: regex
|
31 |
+
group_select: -1
|
32 |
+
regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+)
|
33 |
+
- function: take_first
|
34 |
+
name: flexible-extract
|
35 |
+
metadata:
|
36 |
+
version: 2.0
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: bn
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"প্রশ্ন: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'প্রশ্ন:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_bn
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: de
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Frage: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Frage:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_de
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: en
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Question:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_en
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Pregunta: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Pregunta:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_es
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fr
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question : "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Question :'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_fr
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ja
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"問題: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- '問題:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_ja
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ru
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Задача: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Задача:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_ru
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sw
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Swali: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'Swali:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_sw
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: te
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"ప్రశ్న: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'ప్రశ్న:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_te
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: th
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"โจทย์: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- 'โจทย์:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_th
|
lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: zh
|
3 |
+
doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}'
|
4 |
+
doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"问题: "+question+"\nStep-by-Step Answer:"}}{% endif %}'
|
5 |
+
generation_kwargs:
|
6 |
+
do_sample: false
|
7 |
+
until:
|
8 |
+
- '问题:'
|
9 |
+
- </s>
|
10 |
+
- <|im_end|>
|
11 |
+
include: cot_yaml
|
12 |
+
task: mgsm_en_cot_zh
|