applied-ai-018 commited on
Commit
a9dbdfb
·
verified ·
1 Parent(s): d90d653

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/lm_eval/tasks/asdiv/README.md +56 -0
  2. lm-evaluation-harness/lm_eval/tasks/asdiv/default.yaml +16 -0
  3. lm-evaluation-harness/lm_eval/tasks/basqueglue/README.md +72 -0
  4. lm-evaluation-harness/lm_eval/tasks/basqueglue/bec.yaml +16 -0
  5. lm-evaluation-harness/lm_eval/tasks/basqueglue/bhtc.yaml +16 -0
  6. lm-evaluation-harness/lm_eval/tasks/basqueglue/coref.yaml +16 -0
  7. lm-evaluation-harness/lm_eval/tasks/basqueglue/qnli.yaml +16 -0
  8. lm-evaluation-harness/lm_eval/tasks/basqueglue/utils.py +78 -0
  9. lm-evaluation-harness/lm_eval/tasks/basqueglue/vaxx.yaml +16 -0
  10. lm-evaluation-harness/lm_eval/tasks/basqueglue/wic.yaml +17 -0
  11. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +26 -0
  12. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +38 -0
  13. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/utils.py +39 -0
  17. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_generate_configs.py +26 -0
  18. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml +4 -0
  20. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/utils.py +41 -0
  22. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py +26 -0
  23. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml +21 -0
  24. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml +4 -0
  27. lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/utils.py +38 -0
  28. lm-evaluation-harness/lm_eval/tasks/logiqa2/README.md +52 -0
  29. lm-evaluation-harness/lm_eval/tasks/logiqa2/logieval.yaml +29 -0
  30. lm-evaluation-harness/lm_eval/tasks/logiqa2/logiqa2.yaml +21 -0
  31. lm-evaluation-harness/lm_eval/tasks/logiqa2/utils_logiqa2.py +27 -0
  32. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml +4 -0
  33. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml +4 -0
  34. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml +4 -0
  35. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml +4 -0
  36. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml +4 -0
  37. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml +4 -0
  38. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml +4 -0
  39. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/risk-averse.yaml +4 -0
  40. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml +4 -0
  41. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml +4 -0
  42. lm-evaluation-harness/lm_eval/tasks/swag/README.md +52 -0
  43. lm-evaluation-harness/lm_eval/tasks/swag/swag.yaml +19 -0
  44. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_default_template_yaml +19 -0
  45. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_generate_configs.py +210 -0
  46. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml +6 -0
  47. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml +7 -0
  48. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml +7 -0
  49. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml +7 -0
  50. lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml +7 -0
lm-evaluation-harness/lm_eval/tasks/asdiv/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ASDiv
2
+
3
+ ### Paper
4
+
5
+ Title: `ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers`
6
+
7
+ Abstract: https://arxiv.org/abs/2106.15772
8
+
9
+ ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language
10
+ patterns and problem types) English math word problem (MWP) corpus for evaluating
11
+ the capability of various MWP solvers. Existing MWP corpora for studying AI progress
12
+ remain limited either in language usage patterns or in problem types. We thus present
13
+ a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem
14
+ types taught in elementary school. Each MWP is annotated with its problem type and grade
15
+ level (for indicating the level of difficulty).
16
+
17
+ NOTE: We currently ignore formulas for answer generation.
18
+
19
+ Homepage: https://github.com/chaochun/nlu-asdiv-dataset
20
+
21
+
22
+ ### Citation
23
+
24
+ ```
25
+ @misc{miao2021diverse,
26
+ title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers},
27
+ author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su},
28
+ year={2021},
29
+ eprint={2106.15772},
30
+ archivePrefix={arXiv},
31
+ primaryClass={cs.AI}
32
+ }
33
+ ```
34
+
35
+ ### Groups and Tasks
36
+
37
+ #### Groups
38
+
39
+ * Not part of a group yet.
40
+
41
+ #### Tasks
42
+
43
+ * `asdiv`
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [ ] Is the task an existing benchmark in the literature?
49
+ * [ ] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [ ] Is the "Main" variant of this task clearly denoted?
55
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/asdiv/default.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: asdiv
2
+ dataset_path: EleutherAI/asdiv
3
+ output_type: loglikelihood
4
+ validation_split: validation
5
+ doc_to_text: "{{body}}\nQuestion:{{question}}\nAnswer:"
6
+ doc_to_target: "{{answer.split(' (')[0]}}"
7
+ should_decontaminate: true
8
+ doc_to_decontamination_query: "{{body}} {{question}}"
9
+ metric_list:
10
+ - metric: acc
11
+ aggregation: mean
12
+ higher_is_better: true
13
+ metadata:
14
+ version: 1.0
15
+ dataset_kwargs:
16
+ trust_remote_code: true
lm-evaluation-harness/lm_eval/tasks/basqueglue/README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BasqueGLUE
2
+
3
+ ### Paper
4
+
5
+ Title: `BasqueGLUE: A Natural Language Understanding Benchmark for Basque`
6
+
7
+ Abstract: `https://aclanthology.org/2022.lrec-1.172/`
8
+
9
+ Natural Language Understanding (NLU) technology has improved significantly over the last few years and multitask benchmarks such as GLUE are key to evaluate this improvement in a robust and general way. These benchmarks take into account a wide and diverse set of NLU tasks that require some form of language understanding, beyond the detection of superficial, textual clues. However, they are costly to develop and language-dependent, and therefore they are only available for a small number of languages. In this paper, we present BasqueGLUE, the first NLU benchmark for Basque, a less-resourced language, which has been elaborated from previously existing datasets and following similar criteria to those used for the construction of GLUE and SuperGLUE. We also report the evaluation of two state-of-the-art language models for Basque on BasqueGLUE, thus providing a strong baseline to compare upon. BasqueGLUE is freely available under an open license.
10
+
11
+ Homepage: `https://github.com/orai-nlp/BasqueGLUE`
12
+
13
+ Title: `Latxa: An Open Language Model and Evaluation Suite for Basque`
14
+
15
+ Abstract: `https://arxiv.org/abs/2403.20266`
16
+
17
+ The use of BasqueGLUE for evaluating the performance of decoder models in Basque is presented in this paper.
18
+
19
+ Homepage: `https://github.com/hitz-zentroa/latxa`
20
+
21
+ ### Citation
22
+
23
+ ```
24
+ @InProceedings{urbizu2022basqueglue,
25
+ author = {Urbizu, Gorka and San Vicente, Iñaki and Saralegi, Xabier and Agerri, Rodrigo and Soroa, Aitor},
26
+ title = {BasqueGLUE: A Natural Language Understanding Benchmark for Basque},
27
+ booktitle = {Proceedings of the Language Resources and Evaluation Conference},
28
+ month = {June},
29
+ year = {2022},
30
+ address = {Marseille, France},
31
+ publisher = {European Language Resources Association},
32
+ pages = {1603--1612},
33
+ url = {https://aclanthology.org/2022.lrec-1.172}
34
+ }
35
+
36
+ @misc{etxaniz2024latxa,
37
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
38
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
39
+ year={2024},
40
+ eprint={2403.20266},
41
+ archivePrefix={arXiv},
42
+ primaryClass={cs.CL}
43
+ }
44
+ ```
45
+
46
+ ### Groups and Tasks
47
+
48
+ #### Groups
49
+
50
+ * `basque-glue`: First version of the implementation
51
+
52
+ #### Tasks
53
+
54
+ * `bhtc_v2`: Topic classification of news extracts with 12 categories.
55
+ * `bec`: Sentiment analysis on tweets about the campaign for the 2016 Basque elections.
56
+ * `vaxx_stance`: Stance detection on tweets around the anti-vaccine movement.
57
+ * `qnlieu`: Q&A NLI as in [glue/qnli](../glue/qnli).
58
+ * `wiceu`: Word-in-Context as in [super_glue/wic](../super_glue/wic).
59
+ * `epec_korref_bin`: Correference detection as in [super_glue/wsc](../super_glue/wsc).
60
+
61
+ ### Checklist
62
+
63
+ For adding novel benchmarks/datasets to the library:
64
+ * [ ] Is the task an existing benchmark in the literature?
65
+ * [ ] Have you referenced the original paper that introduced the task?
66
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
67
+
68
+
69
+ If other tasks on this dataset are already supported:
70
+ * [ ] Is the "Main" variant of this task clearly denoted?
71
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
72
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/basqueglue/bec.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: bec2016eu
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: bec
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Testua: {{text}}\nGaldera: Nolako jarrera agertzen du aurreko testuak?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['negatiboa', 'neutrala', 'positiboa']
11
+ metric_list:
12
+ - metric: f1
13
+ aggregation: !function utils.micro_f1_score
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/basqueglue/bhtc.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: bhtc_v2
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: bhtc
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Testua: {{text}}\nGaldera: Zein da aurreko testuaren gaia?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['Ekonomia', 'Euskal Herria', 'Euskara', 'Gizartea', 'Historia', 'Ingurumena', 'Iritzia', 'Komunikazioa', 'Kultura', 'Nazioartea', 'Politika', 'Zientzia']
11
+ metric_list:
12
+ - metric: f1
13
+ aggregation: !function utils.micro_f1_score
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/basqueglue/coref.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: epec_koref_bin
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: coref
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: !function utils.coref_doc_to_text
9
+ doc_to_target: label
10
+ doc_to_choice: ['ez', 'bai']
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/basqueglue/qnli.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: qnlieu
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: qnli
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "{{question}}\n{{sentence}}\nGaldera: aurreko galderari erantzuten al dio emandako testuak?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['bai', 'ez']
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/basqueglue/utils.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+ import re
3
+
4
+ from datasets import load_metric
5
+
6
+
7
+ def general_detokenize(string):
8
+ string = re.sub(r"\s+([.,;:!?)])", r"\1", string)
9
+ string = re.sub(r"(\s+|^)\(\s+([^)]+)\s+\)", r"\1(\2)", string)
10
+ string = re.sub(r"(\s+|^)\[\s+([^)]+)\s+\]", r"\1[\2]", string)
11
+ string = re.sub(r'(\s+|^)"\s+([^"]+)\s+"', r'\1"\2"', string)
12
+ string = re.sub(r"(\s+|^)'\s+([^']+)\s+'", r"\1'\2'", string)
13
+ return string
14
+
15
+
16
+ def process_doc(string):
17
+ string = html.unescape(string)
18
+ string = general_detokenize(string)
19
+ return string
20
+
21
+
22
+ def process_wic_docs(dataset):
23
+ def _helper(doc):
24
+ # there's some issues with the encoding on this one
25
+ doc["sentence1"] = (
26
+ process_doc(doc["sentence1"]).encode("latin-1").decode("utf-8")
27
+ )
28
+ doc["sentence2"] = (
29
+ process_doc(doc["sentence2"]).encode("latin-1").decode("utf-8")
30
+ )
31
+ return doc
32
+
33
+ return dataset.map(_helper)
34
+
35
+
36
+ def coref_doc_to_text(x):
37
+ def _span_in_context(span_index, span_text):
38
+ span_start = span_index
39
+ span_end = span_start + len(span_text.split(" ")) - 1
40
+ tokens[span_start] = f"*{tokens[span_start]}"
41
+ tokens[span_end] = f"{tokens[span_end]}*"
42
+
43
+ tokens = x["text"].split(" ")
44
+ _span_in_context(x["span1_index"], x["span1_text"])
45
+ _span_in_context(
46
+ x["span2_index"] - 1, x["span2_text"]
47
+ ) # span1_index is 0-based but span2_index is 1-based ??
48
+ context = process_doc(" ".join(tokens))
49
+ span_1 = process_doc(x["span1_text"])
50
+ span_2 = process_doc(x["span2_text"])
51
+ text = (
52
+ f"Testua: {context}\n"
53
+ + f'Galdera: Aurreko testuan, "*{span_1}*" eta "*{span_2}*" gauza bera dira?\n'
54
+ + "Erantzuna:"
55
+ )
56
+ return text
57
+
58
+
59
+ # Measure F1 as in the benchmark repo: https://github.com/orai-nlp/BasqueGLUE/blob/main/eval_basqueglue.py
60
+
61
+
62
+ def micro_f1_score(items):
63
+ f1_metric = load_metric("f1")
64
+ golds, preds = list(zip(*items))
65
+ f1_score = f1_metric.compute(references=golds, predictions=preds, average="micro")[
66
+ "f1"
67
+ ]
68
+ return f1_score
69
+
70
+
71
+ def vaxx_f1_score(items):
72
+ f1_metric = load_metric("f1")
73
+ golds, preds = list(zip(*items))
74
+ f1_class = f1_metric.compute(
75
+ references=golds, predictions=preds, labels=[0, 2], average=None
76
+ )["f1"]
77
+ f1_score = sum(f1_class) / len(f1_class)
78
+ return f1_score
lm-evaluation-harness/lm_eval/tasks/basqueglue/vaxx.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: vaxx_stance
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: vaxx
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_text: "Testua: {{text}}\nGaldera: Nolako jarrera agertzen du aurreko testuak txertoei buruz?\nErantzuna:"
9
+ doc_to_target: label
10
+ doc_to_choice: ['aurka', 'neutrala', 'alde']
11
+ metric_list:
12
+ - metric: f1
13
+ aggregation: !function utils.vaxx_f1_score
14
+ higher_is_better: true
15
+ metadata:
16
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/basqueglue/wic.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: basque-glue
2
+ task: wiceu
3
+ dataset_path: orai-nlp/basqueGLUE
4
+ dataset_name: wic
5
+ output_type: multiple_choice
6
+ validation_split: validation
7
+ test_split: test
8
+ process_docs: !function utils.process_wic_docs
9
+ doc_to_text: "1. esaldia: {{sentence1}}\n2. esaldia: {{sentence2}}\nGaldera: Aurreko bi esaldietan, \"{{word}}\" hitzak esanahi berdina du?\nErantzuna:"
10
+ doc_to_target: label
11
+ doc_to_choice: ['ez', 'bai']
12
+ metric_list:
13
+ - metric: acc
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ metadata:
17
+ - version: 1.0
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
10
+ doc_to_target: answer
11
+ filter_list:
12
+ - name: "strict-match"
13
+ filter:
14
+ - function: "regex"
15
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
16
+ - function: "take_first"
17
+ - name: "flexible-extract"
18
+ filter:
19
+ - function: "multi_choice_regex"
20
+ group_select: -1
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ regex_pattern: "(\\([A-Z]\\))"
24
+ - function: "take_first"
25
+ generation_kwargs:
26
+ until:
27
+ - "</s>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ num_fewshot: 0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_diamond_cot_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_extended_cot_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_main_cot_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_n_shot.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_gpqa_n_shot_yaml",
16
+ "task": f"gpqa_{task}_n_shot",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_diamond_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_extended_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_n_shot_yaml
4
+ task: gpqa_main_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/utils.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ rng = random.Random(42)
18
+
19
+
20
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
21
+ def _process_doc(doc):
22
+ choices = [
23
+ preprocess(doc["Incorrect Answer 1"]),
24
+ preprocess(doc["Incorrect Answer 2"]),
25
+ preprocess(doc["Incorrect Answer 3"]),
26
+ preprocess(doc["Correct Answer"]),
27
+ ]
28
+
29
+ rng.shuffle(choices)
30
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
31
+
32
+ out_doc = {
33
+ "choice1": choices[0],
34
+ "choice2": choices[1],
35
+ "choice3": choices[2],
36
+ "choice4": choices[3],
37
+ "answer": f"({chr(65 + correct_answer_index)})",
38
+ }
39
+ return out_doc
40
+
41
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
10
+ doc_to_target: answer
11
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
12
+ num_fewshot: 0
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_diamond_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_extended_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_zeroshot_yaml
4
+ task: gpqa_main_zeroshot
lm-evaluation-harness/lm_eval/tasks/gpqa/zeroshot/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "answer": f"({chr(65 + correct_answer_index)})",
35
+ }
36
+ return out_doc
37
+
38
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/logiqa2/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LogiQA 2.0
2
+
3
+ ### Paper
4
+
5
+ LogiQA 2.0 — An Improved Dataset for Logical Reasoning in Natural Language Understanding https://ieeexplore.ieee.org/document/10174688
6
+
7
+
8
+ The dataset is an amendment and re-annotation of LogiQA in 2020, a large-scale logical reasoning reading comprehension dataset adapted from the Chinese Civil Service Examination. This new version has an increased data size, the texts are refined with manual translation by professionals, and improved by removing items with distinctive cultural features like Chinese idioms.
9
+
10
+ Furthermore, a two-way natural language inference (NLI) task is introduced, resulting in 35k premise-hypothesis pairs with gold labels, making it the first large-scale NLI dataset for complex logical reasoning
11
+
12
+ Homepage: https://github.com/csitfun/LogiQA2.0
13
+
14
+ ### Citation
15
+
16
+ ```bibtex
17
+ @ARTICLE{10174688,
18
+ author={Liu, Hanmeng and Liu, Jian and Cui, Leyang and Teng, Zhiyang and Duan, Nan and Zhou, Ming and Zhang, Yue},
19
+ journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
20
+ title={LogiQA 2.0 — An Improved Dataset for Logical Reasoning in Natural Language Understanding},
21
+ year={2023},
22
+ volume={},
23
+ number={},
24
+ pages={1-16},
25
+ doi={10.1109/TASLP.2023.3293046}}
26
+ ```
27
+
28
+ ### Groups and Tasks
29
+
30
+ #### Groups
31
+
32
+ * Not part of a group yet
33
+
34
+ #### Tasks
35
+
36
+ * `logiqa2_zh`: The original dataset in Chinese.
37
+ * `logiqa2_NLI`: The NLI version of the dataset converted from the MRC version.
38
+ * `logieval`: Prompt based; https://github.com/csitfun/LogiEval
39
+
40
+ NOTE! The subtasks have not been verified yet.
41
+
42
+ ### Checklist
43
+
44
+ * [x] Is the task an existing benchmark in the literature?
45
+ * [x] Have you referenced the original paper that introduced the task?
46
+ * [x] If yes, does the original paper provide a reference implementation?
47
+ * [x] The original paper does not. There is another implementation of this task, but it designed for instruction tuned models: https://github.com/csitfun/LogiEval
48
+
49
+ If other tasks on this dataset are already supported:
50
+ * [x] Is the "Main" variant of this task clearly denoted?
51
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
52
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/logiqa2/logieval.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: logieval
2
+ dataset_path: baber/logiqa2
3
+ dataset_name: logieval
4
+ output_type: generate_until
5
+ training_split: train
6
+ test_split: test
7
+ # Instructions + {content}
8
+ doc_to_text: "Instructions: You will be presented with a passage and a question about that passage. There are four options to be chosen from, you need to choose the only correct option to answer that question. If the first option is right, you generate the answer 'A', if the second option is right, you generate the answer 'B', if the third option is right, you generate the answer 'C', if the fourth option is right, you generate the answer 'D'. Read the question and options thoroughly and select the correct answer from the four answer labels. Read the passage thoroughly to ensure you know what the passage entails.\n{{content}}"
9
+ doc_to_target: "{{ideal}}"
10
+ metric_list:
11
+ - metric: exact_match
12
+ aggregation: mean
13
+ higher_is_better: true
14
+ generation_kwargs:
15
+ do_sample: false
16
+ num_fewshot: 1
17
+ filter_list:
18
+ - name: "get-answer"
19
+ filter:
20
+ - function: "regex"
21
+ # starts with A-D excluding leading spaces
22
+ # original implementation uses a.startswith(b)
23
+ # https://github.com/openai/evals/blob/305b237cdb3884c7ddb6a5d12cb184a83551fcba/evals/api.py#L84
24
+ regex_pattern: "^\\s*([A-D])"
25
+ - function: "take_first"
26
+ metadata:
27
+ version: 0.0
28
+ dataset_kwargs:
29
+ trust_remote_code: true
lm-evaluation-harness/lm_eval/tasks/logiqa2/logiqa2.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: logiqa2
2
+ dataset_path: baber/logiqa2
3
+ dataset_name: logiqa2
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ doc_to_choice: "{{options}}"
9
+ doc_to_text: !function utils_logiqa2.doc_to_text
10
+ doc_to_target: "{{answer}}"
11
+ doc_to_decontamination_query: "{{context}}"
12
+ should_decontaminate: false
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/logiqa2/utils_logiqa2.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from Master
2
+ def doc_to_text(doc) -> str:
3
+ """
4
+ Passage: <passage>
5
+ Question: <question>
6
+ A. <choice1>
7
+ B. <choice2>
8
+ C. <choice3>
9
+ D. <choice4>
10
+ Answer:
11
+ """
12
+ choices = ["a", "b", "c", "d"]
13
+ prompt = "Passage: " + doc["text"] + "\n"
14
+ prompt += "Question: " + doc["question"] + "\n"
15
+ for choice, option in zip(choices, doc["options"]):
16
+ prompt += f"{choice.upper()}. {option}\n"
17
+ prompt += "Answer:"
18
+ return prompt
19
+
20
+
21
+ # # https://github.com/csitfun/LogiQA2.0/blob/main/logiqa2nli/nli-prompt.py
22
+ # def doc_to_textNLI(doc):
23
+ # maj_premise = ' '.join(list(doc['major_premise']))
24
+ # min_premise = ' '.join(list(doc['minor_premise']))
25
+ # hypo = doc['conclusion']
26
+ # prompt_input = "Given the fact: " + maj_premise + ' ' + min_premise + " Does it follow that: " + hypo + " Yes or no?"
27
+ # return prompt_input
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-self-improvement
3
+ include: _template_yaml
4
+ task: persona_desire-for-self-improvement
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-persuade-people-to-be-less-harmful-to-others
3
+ include: _template_yaml
4
+ task: persona_desire-to-persuade-people-to-be-less-harmful-to-others
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: has-serious-disability
3
+ include: _template_yaml
4
+ task: persona_has-serious-disability
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-art
3
+ include: _template_yaml
4
+ task: persona_interest-in-art
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-math
3
+ include: _template_yaml
4
+ task: persona_interest-in-math
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-sports
3
+ include: _template_yaml
4
+ task: persona_interest-in-sports
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: machiavellianism
3
+ include: _template_yaml
4
+ task: persona_machiavellianism
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/risk-averse.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: risk-averse
3
+ include: _template_yaml
4
+ task: persona_risk-averse
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: subscribes-to-Atheism
3
+ include: _template_yaml
4
+ task: persona_subscribes-to-Atheism
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: subscribes-to-act-utilitarianism
3
+ include: _template_yaml
4
+ task: persona_subscribes-to-act-utilitarianism
lm-evaluation-harness/lm_eval/tasks/swag/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SWAG
2
+
3
+ ### Paper
4
+
5
+ Title: `SWAG: A Large-Scale Adversarial Dataset for Grounded Commonsense Inference`
6
+
7
+ Abstract: https://arxiv.org/pdf/1808.05326.pdf
8
+
9
+ SWAG (Situations With Adversarial Generations) is an adversarial dataset
10
+ that consists of 113k multiple choice questions about grounded situations. Each
11
+ question is a video caption from LSMDC or ActivityNet Captions, with four answer
12
+ choices about what might happen next in the scene. The correct answer is the
13
+ (real) video caption for the next event in the video; the three incorrect
14
+ answers are adversarially generated and human verified, so as to fool machines
15
+ but not humans.
16
+
17
+ Homepage: https://rowanzellers.com/swag/
18
+
19
+
20
+ ### Citation
21
+
22
+ ```
23
+ @inproceedings{zellers2018swagaf,
24
+ title={SWAG: A Large-Scale Adversarial Dataset for Grounded Commonsense Inference},
25
+ author={Zellers, Rowan and Bisk, Yonatan and Schwartz, Roy and Choi, Yejin},
26
+ booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
27
+ year={2018}
28
+ }
29
+ ```
30
+
31
+ ### Groups and Tasks
32
+
33
+ #### Groups
34
+
35
+ * Not a part of a task yet.
36
+
37
+ #### Tasks
38
+
39
+ * `swag`
40
+
41
+ ### Checklist
42
+
43
+ For adding novel benchmarks/datasets to the library:
44
+ * [ ] Is the task an existing benchmark in the literature?
45
+ * [ ] Have you referenced the original paper that introduced the task?
46
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
47
+
48
+
49
+ If other tasks on this dataset are already supported:
50
+ * [ ] Is the "Main" variant of this task clearly denoted?
51
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
52
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/swag/swag.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: swag
2
+ dataset_path: swag
3
+ dataset_name: regular
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: null
8
+ doc_to_text: startphrase
9
+ doc_to_target: label
10
+ doc_to_choice: "{{[ending0, ending1, ending2, ending3]}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: ZoneTwelve/tmmluplus # a copy of `ikala/tmmluplus`
2
+ test_split: test
3
+ fewshot_split: train
4
+ fewshot_config:
5
+ sampler: first_n
6
+ output_type: multiple_choice
7
+ process_docs: !function utils.process_docs
8
+ doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: answer
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.1
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_generate_configs.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all "other" splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import pandas as pd
8
+ import yaml
9
+ from tqdm import tqdm
10
+
11
+
12
+ # Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py
13
+ # from TMMLU+ offical example
14
+ categories = {
15
+ "STEM": [
16
+ "physics",
17
+ "chemistry",
18
+ "biology",
19
+ "computer science",
20
+ "math",
21
+ "engineering",
22
+ ],
23
+ "humanities": ["history", "philosophy", "law"],
24
+ "social_sciences": [
25
+ "politics",
26
+ "culture",
27
+ "economics",
28
+ "geography",
29
+ "psychology",
30
+ "education",
31
+ ],
32
+ "other": ["other", "business", "health"], # (business, health, misc.)
33
+ }
34
+
35
+ task_list = [
36
+ "engineering_math",
37
+ "dentistry",
38
+ "traditional_chinese_medicine_clinical_medicine",
39
+ "clinical_psychology",
40
+ "technical",
41
+ "culinary_skills",
42
+ "mechanical",
43
+ "logic_reasoning",
44
+ "real_estate",
45
+ "general_principles_of_law",
46
+ "finance_banking",
47
+ "anti_money_laundering",
48
+ "ttqav2",
49
+ "marketing_management",
50
+ "business_management",
51
+ "organic_chemistry",
52
+ "advance_chemistry",
53
+ "physics",
54
+ "secondary_physics",
55
+ "human_behavior",
56
+ "national_protection",
57
+ "jce_humanities",
58
+ "politic_science",
59
+ "agriculture",
60
+ "official_document_management",
61
+ "financial_analysis",
62
+ "pharmacy",
63
+ "educational_psychology",
64
+ "statistics_and_machine_learning",
65
+ "management_accounting",
66
+ "introduction_to_law",
67
+ "computer_science",
68
+ "veterinary_pathology",
69
+ "accounting",
70
+ "fire_science",
71
+ "optometry",
72
+ "insurance_studies",
73
+ "pharmacology",
74
+ "taxation",
75
+ "education_(profession_level)",
76
+ "economics",
77
+ "veterinary_pharmacology",
78
+ "nautical_science",
79
+ "occupational_therapy_for_psychological_disorders",
80
+ "trust_practice",
81
+ "geography_of_taiwan",
82
+ "physical_education",
83
+ "auditing",
84
+ "administrative_law",
85
+ "basic_medical_science",
86
+ "macroeconomics",
87
+ "trade",
88
+ "chinese_language_and_literature",
89
+ "tve_design",
90
+ "junior_science_exam",
91
+ "junior_math_exam",
92
+ "junior_chinese_exam",
93
+ "junior_social_studies",
94
+ "tve_mathematics",
95
+ "tve_chinese_language",
96
+ "tve_natural_sciences",
97
+ "junior_chemistry",
98
+ "music",
99
+ "education",
100
+ "three_principles_of_people",
101
+ "taiwanese_hokkien",
102
+ ]
103
+ subject2name = {}
104
+ # subject2category = {}
105
+ SUBJECTS = {}
106
+
107
+
108
+ def parse_args():
109
+ parser = argparse.ArgumentParser()
110
+ parser.add_argument("--base_yaml_path", required=True)
111
+ parser.add_argument("--save_prefix_path", default="tmmluplus")
112
+ parser.add_argument("--cot_prompt_path", default=None)
113
+ parser.add_argument("--task_prefix", default="")
114
+ parser.add_argument("--group_prefix", default="")
115
+ parser.add_argument("--subject_file", default="subject.tsv")
116
+ return parser.parse_args()
117
+
118
+
119
+ if __name__ == "__main__":
120
+ args = parse_args()
121
+ from pathlib import Path
122
+
123
+ # Initialization
124
+ SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
125
+
126
+ df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
127
+
128
+ for _, row in df.iterrows():
129
+ for _c in categories:
130
+ if row["subject"] in SUBJECTS:
131
+ raise ValueError("Duplicate tasks.")
132
+ if row["category"] in categories[_c]: # append new item into SUBJECTS
133
+ SUBJECTS[row["subject"]] = _c
134
+ subject2name[row["subject"]] = row["name"]
135
+ break
136
+ # End of SUBJECTS initialization
137
+
138
+ # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
139
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
140
+ with open(args.base_yaml_path) as f:
141
+ base_yaml = yaml.full_load(f)
142
+
143
+ if args.cot_prompt_path is not None:
144
+ import json
145
+
146
+ with open(args.cot_prompt_path) as f:
147
+ cot_file = json.load(f)
148
+
149
+ ALL_CATEGORIES = []
150
+ for subject, category in tqdm(SUBJECTS.items()):
151
+ if category not in ALL_CATEGORIES:
152
+ ALL_CATEGORIES.append(category)
153
+
154
+ if args.cot_prompt_path is not None:
155
+ description = cot_file[subject]
156
+ else:
157
+ name_of_subject = subject2name[subject].replace("_", " ")
158
+ description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
159
+ # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
160
+
161
+ yaml_dict = {
162
+ "include": base_yaml_name,
163
+ "group": f"tmmluplus_{args.task_prefix}_{category}"
164
+ if args.task_prefix != ""
165
+ else f"tmmluplus_{category}",
166
+ "group_alias": category.replace("_", " "),
167
+ "task": f"tmmluplus_{args.task_prefix}_{subject}"
168
+ if args.task_prefix != ""
169
+ else f"tmmluplus_{subject}",
170
+ "task_alias": subject.replace("_", " "),
171
+ "dataset_name": subject,
172
+ "description": description,
173
+ }
174
+
175
+ file_save_path = args.save_prefix_path + f"_{subject}.yaml"
176
+ # eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
177
+ with open(file_save_path, "w") as yaml_file:
178
+ yaml.dump(
179
+ yaml_dict,
180
+ yaml_file,
181
+ # width=float("inf"),
182
+ allow_unicode=True,
183
+ default_style='"',
184
+ )
185
+
186
+ if args.task_prefix != "":
187
+ mmlu_subcategories = [
188
+ f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
189
+ ]
190
+ else:
191
+ mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES]
192
+
193
+ if args.group_prefix != "":
194
+ file_save_path = args.group_prefix + ".yaml"
195
+ else:
196
+ file_save_path = args.save_prefix_path + ".yaml"
197
+
198
+ # eval_logger.info(f"Saving benchmark config to {file_save_path}")
199
+ with open(file_save_path, "w") as yaml_file:
200
+ yaml.dump(
201
+ {
202
+ "group": f"tmmluplus_{args.task_prefix}"
203
+ if args.task_prefix != ""
204
+ else "tmmluplus",
205
+ "task": mmlu_subcategories,
206
+ },
207
+ yaml_file,
208
+ indent=4,
209
+ default_flow_style=False,
210
+ )
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ group: tmmluplus
2
+ task:
3
+ - tmmluplus_other
4
+ - tmmluplus_social_sciences
5
+ - tmmluplus_humanities
6
+ - tmmluplus_STEM
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "clinical_psychology"
2
+ "description": "以下為臨床心理學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_clinical_psychology"
7
+ "task_alias": "clinical psychology"
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "computer_science"
2
+ "description": "以下為資訊工程的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_computer_science"
7
+ "task_alias": "computer science"
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "dentistry"
2
+ "description": "以下為牙醫學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_dentistry"
7
+ "task_alias": "dentistry"
lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "education_(profession_level)"
2
+ "description": "以下為教育專業的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_education_(profession_level)"
7
+ "task_alias": "education (profession level)"