diff --git a/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..566e93019b994528bb003f46fb458ed725ef8af1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml @@ -0,0 +1,4 @@ +"dataset_name": "named_entity_recognition" +"description": "以下是关于古汉语命名体识别的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_named_entity_recognition" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4642992674a1f159fe101859dead4509df6c8166 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_appreciate" +"description": "以下是关于古诗词曲鉴赏的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_appreciate" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a7a7bee2c4ca59e0dc7b2f3fdc08371a9a585d42 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml @@ -0,0 +1,4 @@ +"dataset_name": "poetry_quality_assessment" +"description": "以下是关于古诗词质量评估的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_poetry_quality_assessment" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92f2455d8089bcc3b7d1ff8b99c03144b5b7d61d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_reading_comprehension.yaml @@ -0,0 +1,4 @@ +"dataset_name": "reading_comprehension" +"description": "以下是关于古文阅读理解的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "aclue_reading_comprehension" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/drop/README.md b/lm-evaluation/build/lib/lm_eval/tasks/drop/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b7fc47b7165034bd74c524048f5f54ea8d041cf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/drop/README.md @@ -0,0 +1,53 @@ +# DROP + +### Paper + +Title: `DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs` + +Abstract: https://aclanthology.org/attachments/N19-1246.Supplementary.pdf + +DROP is a QA dataset which tests comprehensive understanding of paragraphs. In +this crowdsourced, adversarially-created, 96k question-answering benchmark, a +system must resolve multiple references in a question, map them onto a paragraph, +and perform discrete operations over them (such as addition, counting, or sorting). + +Homepage: https://allenai.org/data/drop + +Acknowledgement: This implementation is based on the official evaluation for `DROP`: +https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py + +### Citation + +``` +@misc{dua2019drop, + title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs}, + author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner}, + year={2019}, + eprint={1903.00161}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `drop` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/drop/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/drop/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a936121524950e8a89822058cb2b29f244f31a4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/drop/default.yaml @@ -0,0 +1,26 @@ +task: drop +dataset_path: EleutherAI/drop +output_type: generate_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs +doc_to_text: "{{passage}} {{question}}" +doc_to_target: "{{ answer|join(',')}}" +target_delimiter: "" +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{passage}} {{question}}" +generation_kwargs: + until: + - "." +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true +metadata: + version: 3.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/drop/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/drop/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54093bb4d28e954035e76d8764a014ca99b99d8d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/drop/utils.py @@ -0,0 +1,204 @@ +import re +import string + +import numpy as np +from scipy.optimize import linear_sum_assignment + + +_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE) + + +def process_docs(dataset): + def _process(doc): + return { + "id": doc["query_id"], + "passage": doc["passage"], + "question": doc["question"], + "answers": get_answers(doc), + } + + return dataset.map(_process) + + +def get_answers(doc): + def _flatten_validated_answers(validated_answers): + """Flattens a dict of lists of validated answers. + {"number": ['1', '8'], ...} + -> [{"number": ['1'], ...}, {"number": ['8'], ...}] + """ + valid_answers = [] + for i in range(len(validated_answers["number"])): + valid_answers.append( + { + "number": validated_answers["number"][i], + "date": validated_answers["date"][i], + "spans": validated_answers["spans"][i], + } + ) + return valid_answers + + answers = [] + answers_set = set() + candidates = [doc["answer"]] + _flatten_validated_answers(doc["validated_answers"]) + for candidate in candidates: + answer = parse_answer(candidate) + if answer in answers_set: + continue + answers_set.add(answer) + answers.append(answer) + return answers + + +def parse_answer(answer): + # NOTE: Everything is returned as a tuple for uniformity and hashability. + if answer["number"] != "": + return (str(answer["number"]),) + if answer["spans"] != []: + return tuple(answer["spans"]) + return ( + " ".join( + [answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]] + ).strip(), + ) + + +def process_results(doc, results): + preds, golds = results, doc["answers"] + max_em = 0 + max_f1 = 0 + for gold_answer in golds: + exact_match, f1_score = get_metrics(preds, gold_answer) + if gold_answer[0].strip(): + max_em = max(max_em, exact_match) + max_f1 = max(max_f1, f1_score) + return {"em": max_em, "f1": max_f1} + + +def get_metrics(predicted, gold): + """ + Takes a predicted answer and a gold answer (that are both either a string or a list of + strings), and returns exact match and the DROP F1 metric for the prediction. If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len( + gold_bags[0] + ): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def _answer_to_bags(answer): + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted, gold): + """ + Takes gold and predicted answer sets and first finds the optimal 1-1 alignment + between them and gets maximum metric values over all the answers. + """ + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag, gold_bag): + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ( + (2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) + else 0.0 + ) + return f1 + + +def _match_numbers_if_present(gold_bag, predicted_bag): + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def _is_number(text): + try: + float(text) + return True + except ValueError: + return False + + +def _remove_articles(text): + return _ARTICLES.sub(" ", text) + + +def _white_space_fix(text): + return " ".join(text.split()) + + +def _remove_punc(text): + exclude = set(string.punctuation) + if not _is_number(text): + return "".join(ch for ch in text if ch not in exclude) + else: + return text + + +def _fix_number(text): + return str(float(text)) if _is_number(text) else text + + +def _tokenize(text): + return re.split(" |-", text) + + +def _normalize(answer): + tokens = [ + _white_space_fix(_remove_articles(_fix_number(_remove_punc(token.lower())))) + for token in _tokenize(answer) + ] + tokens = [token for token in tokens if token.strip()] + normalized = " ".join(tokens).strip() + return normalized diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/README.md b/lm-evaluation/build/lib/lm_eval/tasks/glue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..573c640e87c1ba077d6d9cbe79a045c7c4f02ddf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/README.md @@ -0,0 +1,72 @@ +# GLUE +**NOTE**: GLUE benchmark tasks do not provide publicly accessible labels for their test sets, so we default to the validation sets for all sub-tasks. + +### Paper + +Title: `GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding` + +Abstract: https://openreview.net/pdf?id=rJ4km2R5t7 + +The General Language Understanding Evaluation (GLUE) benchmark is a collection of +resources for training, evaluating, and analyzing natural language understanding +systems. GLUE consists of: +- A benchmark of nine sentence- or sentence-pair language understanding tasks built +on established existing datasets and selected to cover a diverse range of dataset +sizes, text genres, and degrees of difficulty, and +- A diagnostic dataset designed to evaluate and analyze model performance with +respect to a wide range of linguistic phenomena found in natural language. + +Homepage: https://gluebenchmark.com/ + +### Citation + +``` +@inproceedings{wang-etal-2018-glue, + title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", + author = "Wang, Alex and + Singh, Amanpreet and + Michael, Julian and + Hill, Felix and + Levy, Omer and + Bowman, Samuel", + booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}", + month = nov, + year = "2018", + address = "Brussels, Belgium", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/W18-5446", + doi = "10.18653/v1/W18-5446", + pages = "353--355", + abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.", +} +``` + +### Groups and Tasks + +#### Groups + +* `glue`: Run all Glue subtasks. + +#### Tasks + +* `cola` +* `mnli` +* `mrpc` +* `qnli` +* `qqp` +* `rte` +* `sst` +* `wnli` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/cola/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/cola/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a46003c2766ea26a96a6c6b73b750cb5e402119e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/cola/default.yaml @@ -0,0 +1,16 @@ +group: glue +task: cola +dataset_path: glue +dataset_name: cola +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: sentence +metric_list: + - metric: mcc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6caffa85a22719f597f5b780b0653ee124a854c5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: mnli +dataset_path: glue +dataset_name: mnli +output_type: multiple_choice +training_split: train +validation_split: validation_matched +doc_to_text: !function utils.doc_to_text +doc_to_target: label +doc_to_choice: ["True", "Neither", "False"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/mismatch.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/mismatch.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e9b49bcd423ce43bf87f044c75a01e75f44d3d0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/mismatch.yaml @@ -0,0 +1,3 @@ +include: default.yaml +task: mnli_mismatch +validation_split: validation_mismatched diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5fdaec2905ac7cf95ac3e50f1d12c728f59c37 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/mnli/utils.py @@ -0,0 +1,6 @@ +def doc_to_text(doc) -> str: + return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format( + doc["premise"], + doc["hypothesis"].strip() + + ("" if doc["hypothesis"].strip().endswith(".") else "."), + ) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/mrpc/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/mrpc/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0bc24510ca533bde719cba42fb9d079cfb4a53b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/mrpc/default.yaml @@ -0,0 +1,15 @@ +group: glue +task: mrpc +dataset_path: glue +dataset_name: mrpc +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/qnli/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/qnli/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49a6216a5e0b351d2d92ba188bf2dd54823d0132 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/qnli/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: qnli +dataset_path: glue +dataset_name: qnli +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:" +doc_to_target: label +doc_to_choice: ["yes", "no"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/qqp/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/qqp/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bcd82f26bc8552c74f85b23054d90b9084a89211 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/qqp/default.yaml @@ -0,0 +1,15 @@ +group: glue +task: qqp +dataset_path: glue +dataset_name: qqp +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Question 1: {{question1}}\nQuestion 2: {{question2}}\nQuestion: Do both questions ask the same thing?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/rte/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/rte/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b12096a46b2a4fcc3f6f59b4f2d245130425c01 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/rte/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: rte +dataset_path: glue +dataset_name: rte +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:" +doc_to_target: label +doc_to_choice: ["True", "False"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/sst2/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/sst2/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..838afeb218891da139dec48083fa1990fc896b07 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/sst2/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: sst2 +dataset_path: glue +dataset_name: sst2 +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:" +doc_to_target: label +doc_to_choice: ["negative", "positive"] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/glue/wnli/default.yaml b/lm-evaluation/build/lib/lm_eval/tasks/glue/wnli/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8e57a35d67920b7101a4f9e92f873c3c7ec3134 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/glue/wnli/default.yaml @@ -0,0 +1,14 @@ +group: glue +task: wnli +dataset_path: glue +dataset_name: wnli +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:" +doc_to_target: label +doc_to_choice: ["False", "True"] +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/README.md b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13339dfa46366298389e3ad0d3910b00db2c417e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/README.md @@ -0,0 +1,59 @@ +# GSM8k + +## Paper +Training Verifiers to Solve Math Word Problems +https://arxiv.org/abs/2110.14168 + +State-of-the-art language models can match human performance on many tasks, but +they still struggle to robustly perform multi-step mathematical reasoning. To +diagnose the failures of current models and support research, we introduce GSM8K, +a dataset of 8.5K high quality linguistically diverse grade school math word problems. +We find that even the largest transformer models fail to achieve high test performance, +despite the conceptual simplicity of this problem distribution. + +NOTE: See the official implementation of the task: + https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py +for how to make use of the dataset's calculator annotations in your language +model's sample/generation function. + +Homepage: https://github.com/openai/grade-school-math + + +## Citation +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +- `math_word_problems` +- `chain_of_thought` +- `self_consistency` + +#### Tasks + +- `gsm8k_yaml` +- `gsm8k_cot`: GSM8K with Chain-of-Thought +- `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency + +### Checklist + +- [x] Is in Eval-harness v1.0 ? +- [ ] Has been checked for regression from v1.0? +- [ ] Has been checked for equivalence with original paper methodology? +- [ ] "Main" checked variant clearly denoted? + +### Variant Wishlist + +- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation) +- [ ] Using Verifiers +- [ ] Majority voting "without CoT" diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d92ee342d18bb2e9f2da7573fd0c72ddd65db9c8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml @@ -0,0 +1,34 @@ +include: gsm8k-cot.yaml +group: + - chain_of_thought + - self_consistency +task: gsm8k_cot_self_consistency +generation_kwargs: + until: + - "Q:" + - "\n\n" + do_sample: true + temperature: 0.2 +repeats: 64 +filter_list: + - name: "score-first" # pick only the first response, and report metrics on that + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "take_first" + - name: "maj@64" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" + - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal. + filter: + - function: "take_first_k" + k: 8 + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)" + - function: "majority_vote" + - function: "take_first" +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75d4468ac02d551d135ef78a752aba0d157e72ab --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml @@ -0,0 +1,44 @@ +group: + - math_word_problems +task: gsm8k_cot_zeroshot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Q: {{question}}\nA: Let's think step by step." +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot.yaml b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e55020258930e400ace1fc8cb85949e1af347a13 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k-cot.yaml @@ -0,0 +1,51 @@ +group: + - chain_of_thought +task: gsm8k_cot +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +test_split: test +doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\ +Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\ +Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\ +Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\ +Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\ +Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\ +Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\ +Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\ +Q: {{question}}\nA:" +doc_to_target: "{{answer.split('####')[-1].strip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Q:" + - "" + - "<|im_end|>" + do_sample: false +repeats: 1 +num_fewshot: 0 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)." + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 + num_fewshot: 8 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k.yaml b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c4ef836b1b21177d40c10e410cf69051c98e9e3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/gsm8k/gsm8k.yaml @@ -0,0 +1,45 @@ +group: + - math_word_problems +task: gsm8k +dataset_path: gsm8k +dataset_name: main +output_type: generate_until +training_split: train +fewshot_split: train +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: false + regexes_to_ignore: + - "," + - "\\$" + - "(?s).*#### " + - "\\.$" +generation_kwargs: + until: + - "Question:" + - "" + - "<|im_end|>" + do_sample: false + temperature: 0.0 +repeats: 1 +num_fewshot: 5 +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "#### (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - name: "flexible-extract" + filter: + - function: "regex" + group_select: -1 + regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/headqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/headqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e061f0ed44e65ef04cc9d98220058051d509da6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/headqa/README.md @@ -0,0 +1,57 @@ +# HEAD-QA + +### Paper + +HEAD-QA: A Healthcare Dataset for Complex Reasoning +https://arxiv.org/pdf/1906.04701.pdf + +HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the +Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio +de Sanidad, Consumo y Bienestar Social. +The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology. + +Homepage: https://aghie.github.io/head-qa/ + + +### Citation + +``` +@inproceedings{vilares-gomez-rodriguez-2019-head, + title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning", + author = "Vilares, David and + G{\'o}mez-Rodr{\'i}guez, Carlos", + booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", + month = jul, + year = "2019", + address = "Florence, Italy", + publisher = "Association for Computational Linguistics", + url = "https://www.aclweb.org/anthology/P19-1092", + doi = "10.18653/v1/P19-1092", + pages = "960--966", + abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.", +} +``` + +### Groups and Tasks + +#### Groups + +- `headqa`: Evaluates `headqa_en` and `headqa_es` + +#### Tasks + +* `headqa_en` - English variant of HEAD-QA +* `headqa_es` - Spanish variant of HEAD-QA + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\ + * [x] Same as LM Evaluation Harness v0.3.0 implementation diff --git a/lm-evaluation/build/lib/lm_eval/tasks/headqa/headqa_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/headqa/headqa_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eeb2ff12dd4c05b08c199692c3e868b6b50fc362 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/headqa/headqa_en.yaml @@ -0,0 +1,23 @@ +group: + - headqa +task: headqa_en +dataset_path: EleutherAI/headqa +dataset_name: en +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Question: {{qtext}}\nAnswer:" +doc_to_target: "{{ra - 1}}" +doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int. +should_decontaminate: true +doc_to_decontamination_query: query +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/README.md b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..90f8e44bb05394cb95c121946febbaaad6c48d27 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/README.md @@ -0,0 +1,94 @@ +# MGSM + +### Paper + +Title: `Language Models are Multilingual Chain-of-Thought Reasoners` + +Abstract: https://arxiv.org/abs/2210.03057 + +Multilingual Grade School Math Benchmark (MGSM) is a benchmark of grade-school math problems, proposed in the paper [Language models are multilingual chain-of-thought reasoners](http://arxiv.org/abs/2210.03057). + +The same 250 problems from [GSM8K](https://arxiv.org/abs/2110.14168) are each translated via human annotators in 10 languages. The 10 languages are: +- Spanish +- French +- German +- Russian +- Chinese +- Japanese +- Thai +- Swahili +- Bengali +- Telugu + +GSM8K (Grade School Math 8K) is a dataset of 8.5K high quality linguistically diverse grade school math word problems. The dataset was created to support the task of question answering on basic mathematical problems that require multi-step reasoning. + +You can find the input and targets for each of the ten languages (and English) as `.tsv` files. +We also include few-shot exemplars that are also manually translated from each language in `exemplars.py`. + +Homepage: https://github.com/google-research/url-nlp/tree/main/mgsm + + +### Citation + +``` +@misc{cobbe2021training, + title={Training Verifiers to Solve Math Word Problems}, + author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman}, + year={2021}, + eprint={2110.14168}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +@misc{shi2022language, + title={Language Models are Multilingual Chain-of-Thought Reasoners}, + author={Freda Shi and Mirac Suzgun and Markus Freitag and Xuezhi Wang and Suraj Srivats and Soroush Vosoughi and Hyung Won Chung and Yi Tay and Sebastian Ruder and Denny Zhou and Dipanjan Das and Jason Wei}, + year={2022}, + eprint={2210.03057}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `mgsm_direct`: Direct question + * `mgsm_direct_bn`: Bengali + * `mgsm_direct_de`: German + * `mgsm_direct_en`: English + * `mgsm_direct_es`: Spanish + * `mgsm_direct_fr`: French + * `mgsm_direct_ja`: Japanese + * `mgsm_direct_ru`: Russian + * `mgsm_direct_sw`: Swahili + * `mgsm_direct_te`: Telugu + * `mgsm_direct_th`: Thai + * `mgsm_direct_zh`: Chinese +* `mgsm_cot_native`: Question with Answer followed by CoT prompt in the same language as the dataset. + * `mgsm_cot_native_bn`: Bengali + * `mgsm_cot_native_de`: German + * `mgsm_cot_native_en`: English + * `mgsm_cot_native_es`: Spanish + * `mgsm_cot_native_fr`: French + * `mgsm_cot_native_ja`: Japanese + * `mgsm_cot_native_ru`: Russian + * `mgsm_cot_native_sw`: Swahili + * `mgsm_cot_native_te`: Telugu + * `mgsm_cot_native_th`: Thai + * `mgsm_cot_native_zh`: Chinese + +Examplar Samples: https://github.com/google-research/url-nlp/blob/main/mgsm/exemplars.py + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/direct_yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/direct_yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a265cb025916a00807fefd7c3f39466a4ce80ae --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/direct_yaml @@ -0,0 +1,35 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_direct +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first + - filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08e7125127eabeda6fdc08a6a3edd83c84ea277e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"প্রশ্ন: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'প্রশ্ন:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_bn diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..24bc43eda3eaa1815919c9abc7d05697f53be309 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAntwort:"}}{% else %}{{"Frage: "+question+"\nAntwort:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Frage:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_de diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7ef407d39f7addb0688366cfd98005ee7a8da6b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Question: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a6c3c1fd7ed85050098cb4db48db2bdbb86c7db6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Pregunta:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..993c181a97d59c71ee50b67d641995296d373e58 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRéponse :"}}{% else %}{{"Question : "+question+"\nRéponse :"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question :' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_fr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7de11a486d4c5eaf7a2675fec8c9812f7beae0c0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"問題: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - '問題:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_ja diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30d1618faacf5712154132b200b333e519426b95 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Задача: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Задача:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_ru diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0357902d4eea32b0f4619e32f6806599caac4ae5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"Swali: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Swali:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_sw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4abdc7e78ec0ddd597d1ff2210a3474ad397a30a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"ప్రశ్న: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'ప్రశ్న:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_te diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fcf35a6721ab7faa221e023483c7630040b0e72f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"โจทย์: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'โจทย์:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_th diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..283e63f8bcd9f910ea9aa7560ed1c68819c0351a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nAnswer:"}}{% else %}{{"问题: "+question+"\nAnswer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - '问题:' + - + - <|im_end|> +include: direct_yaml +task: mgsm_direct_zh diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/cot_yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/cot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4d502ee52f4389d4331be7dcde287d1c47c3f59 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/cot_yaml @@ -0,0 +1,36 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_cot_native +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "strict-match" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" + - filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1c3c2fcd75827bf0c574090bb2adbc3890bdaf4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"প্রশ্ন: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'প্রশ্ন:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_bn diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2362fb7ac0944da0eae570963603275d459a254 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Frage: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Frage:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_de diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f27a616487aadcda9ac0f6f4e549d9bcd8e26dc1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc748306a473dd11beace7d35ac7453f187c7abb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Pregunta: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Pregunta:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d36dd813a3b86b6300620ec5c74ad0154017edf9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question : "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Question :' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_fr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c98060357ebd1ed60b61555c954a035b9e0080f6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"問題: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - '問題:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_ja diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2bfeb1dafe3cbd989ba3999394b1ea9a294504f5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Задача: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Задача:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_ru diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f37cd3b87eb3660a701eec29ca1d51cc3c630e4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Swali: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'Swali:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_sw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75da745da1b6c27350be39d9e7c535c1d3c93168 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"ప్రశ్న: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'ప్రశ్న:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_te diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ff2177b782ef3c939dd649c484a9b5a83501333 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"โจทย์: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - 'โจทย์:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_th diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f45004aacfd93bc4786b9ebd42cc6283d9a31785 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml @@ -0,0 +1,12 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"问题: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +generation_kwargs: + do_sample: false + until: + - '问题:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_en_cot_zh diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/gen_yaml.sh b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/gen_yaml.sh new file mode 100644 index 0000000000000000000000000000000000000000..27cbbcfdc7ae6bddb463de0c7ceb8ec467ec9c3b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/gen_yaml.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +python utils.py --overwrite --output-dir direct --mode direct +python utils.py --overwrite --output-dir en_cot --mode en-cot +python utils.py --overwrite --output-dir native_cot --mode native-cot diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/cot_yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/cot_yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbba882225b1d7c9fbe10352c64a381c97a547c7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/cot_yaml @@ -0,0 +1,31 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: mgsm_cot_native +dataset_path: juletxara/mgsm +dataset_name: null # Overridden by language-specific config. +output_type: generate_until +training_split: train +test_split: test +# target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +filter_list: + - name: "get-answer" + filter: + - function: "regex" + regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)" + - function: "take_first" +metadata: + version: 3.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_bn.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_bn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eb58c8753784c250ce24860fd21211b62ef0cc31 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_bn.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: bn +doc_to_target: '{% if answer is not none %}{{answer[17:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nধাপে ধাপে উত্তর:"}}{% else %}{{"প্রশ্ন: "+question+"\nধাপে ধাপে উত্তর:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'প্রশ্ন:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_bn diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f4701796945b74fe884a73d931debdf2c7b5ce9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: de +doc_to_target: '{% if answer is not none %}{{answer[29:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nSchritt-für-Schritt-Antwort:"}}{% else %}{{"Frage: "+question+"\nSchritt-für-Schritt-Antwort:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: Die Antwort lautet (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Frage:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_de diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2033b335fb51ec1310f98b4e905f18231c1b68a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: en +doc_to_target: '{% if answer is not none %}{{answer[21:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nStep-by-Step Answer:"}}{% else %}{{"Question: "+question+"\nStep-by-Step Answer:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: The answer is (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Question:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c39fb9c4740ac571db8165a80fdd7efa108f56b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_es.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: es +doc_to_target: '{% if answer is not none %}{{answer[23:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRespuesta paso a paso:"}}{% else %}{{"Pregunta: "+question+"\nRespuesta paso a paso:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: La respuesta es (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Pregunta:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b52b881f7a3f8b30d64ce8eb8ee6b308673626c2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_fr.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: fr +doc_to_target: '{% if answer is not none %}{{answer[26:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nRéponse étape par étape :"}}{% else %}{{"Question : "+question+"\nRéponse étape par étape :"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: La réponse est (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Question :' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_fr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e56bd0b15150e1e435b4d304255c0a751246e86 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: ja +doc_to_target: '{% if answer is not none %}{{answer[11:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nステップごとの答え:"}}{% else %}{{"問題: "+question+"\nステップごとの答え:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: 答えは(\-?[0-9\.\,]+)です。 + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - '問題:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_ja diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ru.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3cff6267a067da1e9d10cfa66aaad7c06618f7ad --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ru.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: ru +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nПошаговоерешение:"}}{% else %}{{"Задача: "+question+"\nПошаговоерешение:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: Ответ — (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Задача:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_ru diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4da793dbc78485cb8167a6fc069b87f7590c960f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: sw +doc_to_target: '{% if answer is not none %}{{answer[25:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nJibu la Hatua kwa Hatua:"}}{% else %}{{"Swali: "+question+"\nJibu la Hatua kwa Hatua:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: Jibu ni (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'Swali:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_sw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1cdbaca8893b6ee626084135c7a64ccd02737b81 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: te +doc_to_target: '{% if answer is not none %}{{answer[19:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nదశలవారీగా సమాధానం:"}}{% else %}{{"ప్రశ్న: "+question+"\nదశలవారీగా సమాధానం:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: సమాధానం (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'ప్రశ్న:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_te diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6931d3a2ff44ab0de25a31a7624f2cd104c655c2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: th +doc_to_target: '{% if answer is not none %}{{answer[18:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\nคำตอบทีละขั้นตอน:"}}{% else %}{{"โจทย์: "+question+"\nคำตอบทีละขั้นตอน:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: คำตอบคือ (\-?[0-9\.\,]+) + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - 'โจทย์:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_th diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f0d7e2dcecaecee05671a636b0a3e27eeeee95e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml @@ -0,0 +1,24 @@ +# Generated by utils.py +dataset_name: zh +doc_to_target: '{% if answer is not none %}{{answer[6:]}}{% else %}{{answer_number|string}}{% endif %}' +doc_to_text: '{% if answer is not none %}{{question+"\n逐步解答:"}}{% else %}{{"问题: "+question+"\n逐步解答:"}}{% endif %}' +filter_list: +- filter: + - function: regex + regex_pattern: 答案是 (\-?[0-9\.\,]+)。 + - function: take_first + name: strict-match +- filter: + - function: regex + group_select: -1 + regex_pattern: (-?[$0-9.,]{2,})|(-?[0-9]+) + - function: take_first + name: flexible-extract +generation_kwargs: + do_sample: false + until: + - '问题:' + - + - <|im_end|> +include: cot_yaml +task: mgsm_native_cot_zh diff --git a/lm-evaluation/build/lib/lm_eval/tasks/mgsm/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..116214f9f4c45ffb9a04757ca41c58114180b259 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/mgsm/utils.py @@ -0,0 +1,228 @@ +import argparse + +import yaml + + +LANGUAGES = { + "bn": { # Bengali + # "QUESTION": "প্রশ্ন:", + "QUESTION": "\u09aa\u09cd\u09b0\u09b6\u09cd\u09a8:", + # "ANSWER": "ধাপে ধাপে উত্তর:", + "ANSWER": "\u09a7\u09be\u09aa\u09c7 \u09a7\u09be\u09aa\u09c7 \u0989\u09a4\u09cd\u09a4\u09b0:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + }, + "de": { # German + "QUESTION": "Frage:", + # "ANSWER": "Schritt-für-Schritt-Antwort:", + "ANSWER": "Schritt-f\u00fcr-Schritt-Antwort:", + "DIRECT": "Antwort:", + "REGEX": "Die Antwort lautet (\\-?[0-9\\.\\,]+)", + }, + "en": { # English + "QUESTION": "Question:", + "ANSWER": "Step-by-Step Answer:", + "DIRECT": "Answer:", + "REGEX": "The answer is (\\-?[0-9\\.\\,]+)", + }, + "es": { # Spanish + "QUESTION": "Pregunta:", + "ANSWER": "Respuesta paso a paso:", + "DIRECT": "Respuesta:", + "REGEX": "La respuesta es (\\-?[0-9\\.\\,]+)", + }, + "fr": { # French + "QUESTION": "Question :", + # "ANSWER": "Réponse étape par étape :" + "ANSWER": "R\u00e9ponse \u00e9tape par \u00e9tape :", + # "DIRECT": "Réponse :", + "DIRECT": "R\u00e9ponse :", + # "REGEX": "La réponse est (\\-?[0-9\\.\\,]+)", + "REGEX": "La r\u00e9ponse est (\\-?[0-9\\.\\,]+)", + }, + "ru": { # Russian + # "QUESTION": "Задача:", + "QUESTION": "\u0417\u0430\u0434\u0430\u0447\u0430:", + # "ANSWER": "Пошаговоерешение:", + "ANSWER": "\u041f\u043e\u0448\u0430\u0433\u043e\u0432\u043e\u0435\u0440\u0435\u0448\u0435\u043d\u0438\u0435:", + "DIRECT": "Answer:", + # "REGEX": "Ответ — (\\-?[0-9\\.\\,]+)", + "REGEX": "\u041e\u0442\u0432\u0435\u0442 \u2014 (\\-?[0-9\\.\\,]+)", + }, + "sw": { # Swahili + "QUESTION": "Swali:", + "ANSWER": "Jibu la Hatua kwa Hatua:", + "DIRECT": "Answer:", + "REGEX": "Jibu ni (\\-?[0-9\\.\\,]+)", + }, + "te": { # Telugu + # "QUESTION": "ప్రశ్న:", + "QUESTION": "\u0c2a\u0c4d\u0c30\u0c36\u0c4d\u0c28:", + # "ANSWER": "దశలవారీగా సమాధానం:", + "ANSWER": "\u0c26\u0c36\u0c32\u0c35\u0c3e\u0c30\u0c40\u0c17\u0c3e \u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02:", + "DIRECT": "Answer:", + # "REGEX": "సమాధానం (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0c38\u0c2e\u0c3e\u0c27\u0c3e\u0c28\u0c02 (\\-?[0-9\\.\\,]+)", + }, + "th": { # Thai + # "QUESTION": "โจทย์:", + "QUESTION": "\u0e42\u0e08\u0e17\u0e22\u0e4c:", + # "ANSWER": "คำตอบทีละขั้นตอน:", + "ANSWER": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e17\u0e35\u0e25\u0e30\u0e02\u0e31\u0e49\u0e19\u0e15\u0e2d\u0e19:", + "DIRECT": "Answer:", + # "REGEX": "คำตอบคือ (\\-?[0-9\\.\\,]+)", + "REGEX": "\u0e04\u0e33\u0e15\u0e2d\u0e1a\u0e04\u0e37\u0e2d (\\-?[0-9\\.\\,]+)", + }, + "ja": { # Japanese + # "QUESTION": "問題:", + "QUESTION": "\u554f\u984c:", + # "ANSWER": "ステップごとの答え:", + "ANSWER": "\u30b9\u30c6\u30c3\u30d7\u3054\u3068\u306e\u7b54\u3048:", + "DIRECT": "Answer:", + # "REGEX": "答えは(\\-?[0-9\\.\\,]+)です。", + "REGEX": "\u7b54\u3048\u306f(\\-?[0-9\\.\\,]+)\u3067\u3059\u3002", + }, + "zh": { # Chinese + # "QUESTION": "问题:", + "QUESTION": "\u95ee\u9898:", + # "ANSWER": "逐步解答:", + "ANSWER": "\u9010\u6b65\u89e3\u7b54:", + "DIRECT": "Answer:", + # "REGEX": "答案是 (\\-?[0-9\\.\\,]+)。", + "REGEX": "\u7b54\u6848\u662f (\\-?[0-9\\.\\,]+)\u3002", + }, +} + + +def add_regex_pattern(regex_pattern): + if regex_pattern is None: + return {} + return { + "filter_list": [ + { + "name": "strict-match", + "filter": [ + { + "function": "regex", + "regex_pattern": f"""{regex_pattern}""", + }, + { + "function": "take_first", + }, + ], + }, + { + "name": "flexible-extract", + "filter": [ + { + "function": "regex", + "regex_pattern": """(-?[$0-9.,]{2,})|(-?[0-9]+)""", + "group_select": -1, + }, + { + "function": "take_first", + }, + ], + }, + ], + } + + +def gen_lang_yamls(output_dir: str, overwrite: bool, mode: str) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + try: + QUESTION = LANGUAGES[lang]["QUESTION"] + + yaml_template = "cot_yaml" + filter_list = {} + DELIMITER = None + if mode == "direct": + ANSWER = LANGUAGES[lang]["DIRECT"] + REGEX = None + task_name = f"mgsm_direct_{lang}" + yaml_template = "direct_yaml" + elif mode == "native-cot": + ANSWER = LANGUAGES[lang]["ANSWER"] + REGEX = LANGUAGES[lang]["REGEX"] + task_name = f"mgsm_native_cot_{lang}" + filter_list = add_regex_pattern(REGEX) + DELIMITER = "" if lang in ["zh", "ja"] else None + elif mode == "en-cot": + ANSWER = LANGUAGES["en"]["ANSWER"] + REGEX = LANGUAGES["en"]["REGEX"] + task_name = f"mgsm_en_cot_{lang}" + + file_name = f"{task_name}.yaml" + ANSWER_TO_SKIP = len(LANGUAGES[lang]["ANSWER"]) + 1 + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": yaml_template, + "dataset_name": lang, + "task": f"{task_name}", + "doc_to_text": f"""{{% if answer is not none %}}""" + f"""{{{{question+"\\n{ANSWER}"}}}}""" + f"""{{% else %}}""" + f"""{{{{"{QUESTION} "+question+"\\n{ANSWER}"}}}}""" + f"""{{% endif %}}""", + "doc_to_target": f"""{{% if answer is not none %}}""" + f"""{{{{answer[{ANSWER_TO_SKIP}:]}}}}""" + f"""{{% else %}}""" + f"""{{{{answer_number|string}}}}""" + f"""{{% endif %}}""", + **filter_list, + "generation_kwargs": { + "until": [QUESTION, "", "<|im_end|>"], + "do_sample": False, + }, + **({"target_delimiter": DELIMITER} if DELIMITER else {}), + }, + f, + allow_unicode=True, + width=float("inf"), + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + parser.add_argument( + "--mode", + default="native-cot", + choices=["direct", "native-cot", "en-cot"], + help="Mode of chain-of-thought", + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite, mode=args.mode) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/piqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/piqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e0d7d05d99fee62fed27374e5cf9f2daee9032b8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/piqa/README.md @@ -0,0 +1,52 @@ +# PIQA + +### Paper + +Title: `PIQA: Reasoning about Physical Commonsense in Natural Language` + +Abstract: https://arxiv.org/abs/1911.11641 + +Physical Interaction: Question Answering (PIQA) is a physical commonsense +reasoning and a corresponding benchmark dataset. PIQA was designed to investigate +the physical knowledge of existing models. To what extent are current approaches +actually learning about the world? + +Homepage: https://yonatanbisk.com/piqa/ + +### Citation + +``` +@inproceedings{Bisk2020, + author = {Yonatan Bisk and Rowan Zellers and + Ronan Le Bras and Jianfeng Gao + and Yejin Choi}, + title = {PIQA: Reasoning about Physical Commonsense in + Natural Language}, + booktitle = {Thirty-Fourth AAAI Conference on + Artificial Intelligence}, + year = {2020}, +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `piqa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/piqa/piqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/piqa/piqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a07250ab9c28d08aede97a159e73b35b5eb5815 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/piqa/piqa.yaml @@ -0,0 +1,21 @@ +task: piqa +dataset_path: piqa +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: null +doc_to_text: "Question: {{goal}}\nAnswer:" +doc_to_target: label +doc_to_choice: "{{[sol1, sol2]}}" +should_decontaminate: true +doc_to_decontamination_query: goal +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/README.md b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c738dd2af65eecaee764cbeaf6a74aea308a0547 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/README.md @@ -0,0 +1,56 @@ +# PubMedQA + +### Paper + +Title: `PubMedQA: A Dataset for Biomedical Research Question Answering` + +Abstract: https://arxiv.org/abs/1909.06146 + +PubMedQA is a novel biomedical question answering (QA) dataset collected from +PubMed abstracts. The task of PubMedQA is to answer research questions with +yes/no/maybe (e.g.: Do preoperative statins reduce atrial fibrillation after +coronary artery bypass grafting?) using the corresponding abstracts. PubMedQA +has 1k expert-annotated, 61.2k unlabeled and 211.3k artificially generated QA +instances. Each PubMedQA instance is composed of (1) a question which is either +an existing research article title or derived from one, (2) a context which is +the corresponding abstract without its conclusion, (3) a long answer, which is +the conclusion of the abstract and, presumably, answers the research question, +and (4) a yes/no/maybe answer which summarizes the conclusion. + +Homepage: https://pubmedqa.github.io/ + + +### Citation + +``` +@inproceedings{jin2019pubmedqa, + title={PubMedQA: A Dataset for Biomedical Research Question Answering}, + author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua}, + booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, + pages={2567--2577}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `pubmed_qa` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py new file mode 100644 index 0000000000000000000000000000000000000000..0dccf9408a12ad5b1a0874ae9b8b0155e1db7ebf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py @@ -0,0 +1,6 @@ +def doc_to_text(doc) -> str: + ctxs = "\n".join(doc["CONTEXTS"]) + return "Abstract: {}\nQuestion: {}\nAnswer:".format( + ctxs, + doc["QUESTION"], + ) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/pubmedqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/pubmedqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47de2fa0980a0a45facbab4416c80373e91e08d5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pubmedqa/pubmedqa.yaml @@ -0,0 +1,16 @@ +task: pubmedqa +dataset_path: bigbio/pubmed_qa +dataset_name: pubmed_qa_labeled_fold0_source +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function preprocess_pubmedqa.doc_to_text +doc_to_target: final_decision +doc_to_choice: ["yes", "no", "maybe"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/webqs/README.md b/lm-evaluation/build/lib/lm_eval/tasks/webqs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..03366161fac76300aa617261b14e16168b5d6285 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/webqs/README.md @@ -0,0 +1,57 @@ +# WEBQs + +### Paper + +Title: `Semantic Parsing on Freebase from Question-Answer Pairs` + +Abstract: `https://cs.stanford.edu/~pliang/papers/freebase-emnlp2013.pdf` + +WebQuestions is a benchmark for question answering. The dataset consists of 6,642 +question/answer pairs. The questions are supposed to be answerable by Freebase, a +large knowledge graph. The questions are mostly centered around a single named entity. +The questions are popular ones asked on the web (at least in 2013). + +Homepage: `https://worksheets.codalab.org/worksheets/0xba659fe363cb46e7a505c5b6a774dc8a` + + +### Citation + +``` +@inproceedings{berant-etal-2013-semantic, + title = "Semantic Parsing on {F}reebase from Question-Answer Pairs", + author = "Berant, Jonathan and + Chou, Andrew and + Frostig, Roy and + Liang, Percy", + booktitle = "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", + month = oct, + year = "2013", + address = "Seattle, Washington, USA", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/D13-1160", + pages = "1533--1544", +} +``` + +### Groups and Tasks + +#### Groups + +* `freebase` + +#### Tasks + +* `webqs`: `Questions with multiple accepted answers.` + +### Checklist + +For adding novel benchmarks/datasets to the library: + * [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/webqs/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/webqs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c65e08ba39087f6ebe5ea04fd9a1a310dbc5a0da --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/webqs/utils.py @@ -0,0 +1,27 @@ +from typing import Dict, List + + +def doc_to_choice(doc: Dict) -> List[str]: + """Return all of the accepted answers as choices.""" + return _remove_prefixes(doc["answers"]) + + +def doc_to_target(doc: Dict) -> List[int]: + """Return list of indices of accepted answers (all of them).""" + remaining = _remove_prefixes(doc["answers"]) + return list(range(len(remaining))) + + +def _remove_prefixes(aliases): + """ + Remove any alias that has a strict prefix elsewhere in the list. + + This is an optimization. We can do this because if the prefix is acceptable by isgreedy, + we can stop looking. + """ + aliases.sort() + ret = [aliases[0]] + for alias in aliases[1:]: + if not alias.startswith(ret[-1]): + ret.append(alias) + return ret diff --git a/lm-evaluation/build/lib/lm_eval/tasks/webqs/webqs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/webqs/webqs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71aaac1eba31ccc02ea06ceefa28aedf127732d2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/webqs/webqs.yaml @@ -0,0 +1,20 @@ +group: + - freebase +task: webqs +dataset_path: web_questions +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: null +test_split: test +doc_to_text: "Question: {{question}}\nAnswer:" +doc_to_target: !function utils.doc_to_target +doc_to_choice: !function utils.doc_to_choice +should_decontaminate: true +doc_to_decontamination_query: question +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wikitext/README.md b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/README.md new file mode 100644 index 0000000000000000000000000000000000000000..237946631345068184361be3dd0df3542b8a69e8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/README.md @@ -0,0 +1,49 @@ +# Wikitext + +### Paper + +Pointer Sentinel Mixture Models +https://arxiv.org/pdf/1609.07843.pdf + +The WikiText language modeling dataset is a collection of over 100 million tokens +extracted from the set of verified Good and Featured articles on Wikipedia. + +NOTE: This `Task` is based on WikiText-2. + +Homepage: https://www.salesforce.com/products/einstein/ai-research/the-wikitext-dependency-language-modeling-dataset/ + + +### Citation + +``` +@misc{merity2016pointer, + title={Pointer Sentinel Mixture Models}, + author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher}, + year={2016}, + eprint={1609.07843}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `wikitext`: measure perplexity on the Wikitext dataset, via rolling loglikelihoods. + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wikitext/preprocess_wikitext.py b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/preprocess_wikitext.py new file mode 100644 index 0000000000000000000000000000000000000000..e5dff22b2805e0e912d8ad263fd3ffda7e529d4c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/preprocess_wikitext.py @@ -0,0 +1,48 @@ +import re + + +def wikitext_detokenizer(doc): + string = doc["page"] + # contractions + string = string.replace("s '", "s'") + string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) + # number separators + string = string.replace(" @-@ ", "-") + string = string.replace(" @,@ ", ",") + string = string.replace(" @.@ ", ".") + # punctuation + string = string.replace(" : ", ": ") + string = string.replace(" ; ", "; ") + string = string.replace(" . ", ". ") + string = string.replace(" ! ", "! ") + string = string.replace(" ? ", "? ") + string = string.replace(" , ", ", ") + # double brackets + string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string) + string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string) + string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string) + string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string) + string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string) + # miscellaneous + string = string.replace("= = = =", "====") + string = string.replace("= = =", "===") + string = string.replace("= =", "==") + string = string.replace(" " + chr(176) + " ", chr(176)) + string = string.replace(" \n", "\n") + string = string.replace("\n ", "\n") + string = string.replace(" N ", " 1 ") + string = string.replace(" 's", "'s") + + return string + + +def process_results(doc, results): + (loglikelihood,) = results + # IMPORTANT: wikitext counts number of words in *original doc before detokenization* + _words = len(re.split(r"\s+", doc["page"])) + _bytes = len(doc["page"].encode("utf-8")) + return { + "word_perplexity": (loglikelihood, _words), + "byte_perplexity": (loglikelihood, _bytes), + "bits_per_byte": (loglikelihood, _bytes), + } diff --git a/lm-evaluation/build/lib/lm_eval/tasks/wikitext/wikitext.yaml b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/wikitext.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc95b1026103695f50db7ec3931e4bbd63932910 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/wikitext/wikitext.yaml @@ -0,0 +1,20 @@ +task: wikitext +dataset_path: EleutherAI/wikitext_document_level +dataset_name: wikitext-2-raw-v1 +output_type: loglikelihood_rolling +training_split: train +validation_split: validation +test_split: test +doc_to_text: "" +doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +process_results: !function preprocess_wikitext.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{page}}" +metric_list: + - metric: word_perplexity + - metric: byte_perplexity + - metric: bits_per_byte +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true