applied-ai-018 commited on
Commit
d90d653
·
verified ·
1 Parent(s): 47c4c3b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/lm_eval/tasks/anli/README.md +56 -0
  2. lm-evaluation-harness/lm_eval/tasks/anli/anli_r1.yaml +26 -0
  3. lm-evaluation-harness/lm_eval/tasks/anli/anli_r2.yaml +5 -0
  4. lm-evaluation-harness/lm_eval/tasks/anli/anli_r3.yaml +5 -0
  5. lm-evaluation-harness/lm_eval/tasks/gpqa/README.md +55 -0
  6. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py +26 -0
  7. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml +38 -0
  8. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py +39 -0
  12. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py +26 -0
  13. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml +39 -0
  14. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py +39 -0
  18. lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml +21 -0
  19. lm-evaluation-harness/lm_eval/tasks/headqa/README.md +57 -0
  20. lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml +23 -0
  21. lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml +3 -0
  22. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq.yaml +3 -0
  23. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_common_yaml +21 -0
  24. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_gu.yaml +3 -0
  25. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_hi.yaml +3 -0
  26. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_kn.yaml +3 -0
  27. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ml.yaml +3 -0
  28. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_mr.yaml +3 -0
  29. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ta.yaml +3 -0
  30. lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_te.yaml +3 -0
  31. lm-evaluation-harness/lm_eval/tasks/indic_boolq/utils.py +136 -0
  32. lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_hi.yaml +3 -0
  33. lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ml.yaml +3 -0
  34. lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_mr.yaml +3 -0
  35. lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ta.yaml +3 -0
  36. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/_generate_configs.py +26 -0
  37. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml +4 -0
  38. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml +4 -0
  39. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml +4 -0
  40. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml +4 -0
  41. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml +4 -0
  42. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml +4 -0
  43. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml +4 -0
  44. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml +4 -0
  45. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml +4 -0
  46. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml +4 -0
  47. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml +4 -0
  48. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml +4 -0
  49. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml +4 -0
  50. lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml +4 -0
lm-evaluation-harness/lm_eval/tasks/anli/README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ANLI
2
+
3
+ ### Paper
4
+
5
+ Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding`
6
+
7
+ Paper Link: https://arxiv.org/abs/1910.14599
8
+
9
+ Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial
10
+ human-and-model-in-the-loop procedure. It consists of three rounds that progressively
11
+ increase in difficulty and complexity, and each question-answer includes annotator-
12
+ provided explanations.
13
+
14
+ Homepage: https://github.com/facebookresearch/anli
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ @inproceedings{nie-etal-2020-adversarial,
20
+ title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding",
21
+ author = "Nie, Yixin and
22
+ Williams, Adina and
23
+ Dinan, Emily and
24
+ Bansal, Mohit and
25
+ Weston, Jason and
26
+ Kiela, Douwe",
27
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
28
+ year = "2020",
29
+ publisher = "Association for Computational Linguistics",
30
+ }
31
+ ```
32
+
33
+ ### Groups and Tasks
34
+
35
+ #### Groups
36
+
37
+ * `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3`
38
+
39
+ #### Tasks
40
+ * `anli_r1`: The data collected adversarially in the first round.
41
+ * `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data.
42
+ * `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data.
43
+
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [x] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [ ] Is the "Main" variant of this task clearly denoted?
55
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/anli/anli_r1.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - anli
3
+ task: anli_r1
4
+ dataset_path: anli
5
+ dataset_name: null
6
+ output_type: multiple_choice
7
+ training_split: train_r1
8
+ validation_split: dev_r1
9
+ test_split: test_r1
10
+ doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:"
11
+ # True = entailment
12
+ # False = contradiction
13
+ # Neither = neutral
14
+ doc_to_target: "{{['True', 'Neither', 'False'][label]}}"
15
+ doc_to_choice:
16
+ - "True"
17
+ - "Neither"
18
+ - "False"
19
+ should_decontaminate: true
20
+ doc_to_decontamination_query: premise
21
+ metric_list:
22
+ - metric: acc
23
+ aggregation: mean
24
+ higher_is_better: true
25
+ metadata:
26
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/anli/anli_r2.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: anli_r1.yaml
2
+ task: anli_r2
3
+ training_split: train_r2
4
+ validation_split: dev_r2
5
+ test_split: test_r2
lm-evaluation-harness/lm_eval/tasks/anli/anli_r3.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: anli_r1.yaml
2
+ task: anli_r3
3
+ training_split: train_r3
4
+ validation_split: dev_r3
5
+ test_split: test_r3
lm-evaluation-harness/lm_eval/tasks/gpqa/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPQA
2
+
3
+ ### Paper
4
+
5
+ Title: GPQA: A Graduate-Level Google-Proof Q&A Benchmark
6
+
7
+ Abstract: https://arxiv.org/abs/2311.12022
8
+
9
+ We present GPQA, a challenging dataset of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry. We ensure that the questions are high-quality and extremely difficult: experts who have or are pursuing PhDs in the corresponding domains reach 65% accuracy (74% when discounting clear mistakes the experts identified in retrospect), while highly skilled non-expert validators only reach 34% accuracy, despite spending on average over 30 minutes with unrestricted access to the web (i.e., the questions are “Google-proof”). The questions are also difficult for state-of-the-art AI systems, with our strongest GPT-4–based baseline achieving 39% accuracy. If we are to use future AI systems to help us answer very hard questions—for example, when developing new scientific knowledge—we need to develop *scalable oversight* methods that enable humans to supervise their outputs, which may be difficult even if the supervisors are themselves skilled and knowledgeable. The difficulty of GPQA both for skilled non-experts and frontier AI systems should enable realistic scalable oversight experiments, which we hope can help devise ways for human experts to reliably get truthful information from AI systems that surpass human capabilities.
10
+
11
+ Homepage: `https://github.com/idavidrein/gpqa/tree/main`
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @misc{rein2023gpqa,
17
+ title={GPQA: A Graduate-Level Google-Proof Q&A Benchmark},
18
+ author={David Rein and Betty Li Hou and Asa Cooper Stickland and Jackson Petty and Richard Yuanzhe Pang and Julien Dirani and Julian Michael and Samuel R. Bowman},
19
+ year={2023},
20
+ eprint={2311.12022},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.AI}
23
+ }
24
+ ```
25
+
26
+ This dataset is gated, so you will have to accept the terms of use at https://huggingface.co/datasets/Idavidrein/gpqa and login via `huggingface-cli login` using your HF Hub token before running this task.
27
+
28
+ ### Groups and Tasks
29
+
30
+ #### Groups
31
+
32
+ * `gpqa`
33
+
34
+ #### Tasks
35
+
36
+ * `gpqa_{main, diamond, extended}_zeroshot`
37
+ * `gpqa_{main, diamond, extended}_n_shot`
38
+ * `gpqa_{main, diamond, extended}_generative_n_shot`
39
+ * `gpqa_{main, diamond, extended}_cot_zeroshot`
40
+ * `gpqa_{main, diamond, extended}_cot_n_shot`
41
+
42
+ ### Checklist
43
+
44
+ For adding novel benchmarks/datasets to the library:
45
+
46
+ * [x] Is the task an existing benchmark in the literature?
47
+ * [x] Have you referenced the original paper that introduced the task?
48
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
49
+
50
+
51
+ If other tasks on this dataset are already supported:
52
+
53
+ * [ ] Is the "Main" variant of this task clearly denoted?
54
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
55
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ do_sample: false
30
+ temperature: 0.0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_diamond_cot_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_extended_cot_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_cot_n_shot_yaml
4
+ task: gpqa_main_cot_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/cot_n_shot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "generative_n_shot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ filter_list:
13
+ - name: "strict-match"
14
+ filter:
15
+ - function: "regex"
16
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
17
+ - function: "take_first"
18
+ - name: "flexible-extract"
19
+ filter:
20
+ - function: "multi_choice_regex"
21
+ group_select: -1
22
+ ignore_case: true
23
+ ignore_punctuation: true
24
+ regex_pattern: "(\\([A-Z]\\))"
25
+ - function: "take_first"
26
+ generation_kwargs:
27
+ until:
28
+ - "</s>"
29
+ - "Question:"
30
+ - "<|im_end|>"
31
+ temperature: 0.0
32
+ metric_list:
33
+ - metric: exact_match
34
+ aggregation: mean
35
+ higher_is_better: true
36
+ ignore_case: true
37
+ ignore_punctuation: true
38
+ metadata:
39
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_diamond_generative_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_extended
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_extended_generative_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_main
3
+ include: _gpqa_generative_n_shot_yaml
4
+ task: gpqa_main_generative_n_shot
lm-evaluation-harness/lm_eval/tasks/gpqa/generative/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: multiple_choice
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ description: "Here are some example questions from experts. Answer the final question yourself, following the format of the previous questions exactly.\n"
10
+ doc_to_text: "Question: {{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nAnswer:"
11
+ doc_to_target: answer
12
+ doc_to_choice: ["(A)", "(B)", "(C)", "(D)"]
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/headqa/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HEAD-QA
2
+
3
+ ### Paper
4
+
5
+ HEAD-QA: A Healthcare Dataset for Complex Reasoning
6
+ https://arxiv.org/pdf/1906.04701.pdf
7
+
8
+ HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the
9
+ Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio
10
+ de Sanidad, Consumo y Bienestar Social.
11
+ The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.
12
+
13
+ Homepage: https://aghie.github.io/head-qa/
14
+
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ @inproceedings{vilares-gomez-rodriguez-2019-head,
20
+ title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning",
21
+ author = "Vilares, David and
22
+ G{\'o}mez-Rodr{\'i}guez, Carlos",
23
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
24
+ month = jul,
25
+ year = "2019",
26
+ address = "Florence, Italy",
27
+ publisher = "Association for Computational Linguistics",
28
+ url = "https://www.aclweb.org/anthology/P19-1092",
29
+ doi = "10.18653/v1/P19-1092",
30
+ pages = "960--966",
31
+ abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.",
32
+ }
33
+ ```
34
+
35
+ ### Groups and Tasks
36
+
37
+ #### Groups
38
+
39
+ - `headqa`: Evaluates `headqa_en` and `headqa_es`
40
+
41
+ #### Tasks
42
+
43
+ * `headqa_en` - English variant of HEAD-QA
44
+ * `headqa_es` - Spanish variant of HEAD-QA
45
+
46
+ ### Checklist
47
+
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [ ] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [x] Is the "Main" variant of this task clearly denoted?
55
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\
57
+ * [x] Same as LM Evaluation Harness v0.3.0 implementation
lm-evaluation-harness/lm_eval/tasks/headqa/headqa_en.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - headqa
3
+ task: headqa_en
4
+ dataset_path: EleutherAI/headqa
5
+ dataset_name: en
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{qtext}}\nAnswer:"
11
+ doc_to_target: "{{ra - 1}}"
12
+ doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int.
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: query
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ metadata:
23
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/headqa/headqa_es.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: headqa_en.yaml
2
+ task: headqa_es
3
+ dataset_name: es
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: [LANG]
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_[LANG]
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_common_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: Cognitive-Lab/Indic-BoolQ
5
+ dataset_path: Cognitive-Lab/Indic-BoolQ
6
+
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ validation_split: validation
10
+ #test_split: null
11
+
12
+ doc_to_text: "Passage: {translated_passage}\nQuestion: {translated_question.strip()}\nAnswer:"
13
+ doc_to_choice: ["true","false"]
14
+ doc_to_target: answer
15
+
16
+ metric_list:
17
+ - metric: acc
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_gu.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: gu
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_gu
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_hi.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: hi
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_hi
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_kn.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: kn
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_kn
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ml.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: ml
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_ml
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_mr.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: mr
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_mr
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_ta.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: ta
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_ta
lm-evaluation-harness/lm_eval/tasks/indic_boolq/indic_boolq_te.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: te
2
+ include: indic_boolq_common_yaml
3
+ task: indic_boolq_te
lm-evaluation-harness/lm_eval/tasks/indic_boolq/utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+
4
+ def convert_choice(choice):
5
+ return choice
6
+
7
+
8
+ def doc_to_text(doc, connector):
9
+ # Drop the period
10
+ conn = connector[doc["question"]]
11
+ return doc["premise"].strip()[:-1] + f" {conn}"
12
+
13
+
14
+ def doc_to_choice(doc):
15
+ return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])]
16
+
17
+
18
+ doc_to_text_hi = partial(
19
+ doc_to_text,
20
+ connector={
21
+ "cause": "कारण",
22
+ "effect": "परिणाम",
23
+ },
24
+ )
25
+
26
+ doc_to_text_mr = partial(
27
+ doc_to_text,
28
+ connector={
29
+ "cause": "कारण",
30
+ "effect": "परिणाम",
31
+ },
32
+ )
33
+
34
+ doc_to_text_as = partial(
35
+ doc_to_text,
36
+ connector={
37
+ "cause": "কাৰণ",
38
+ "effect": "প্ৰভাৱ",
39
+ },
40
+ )
41
+
42
+ doc_to_text_bn = partial(
43
+ doc_to_text,
44
+ connector={
45
+ "cause": "কারণ",
46
+ "effect": "প্রভাব",
47
+ },
48
+ )
49
+
50
+ doc_to_text_gu = partial(
51
+ doc_to_text,
52
+ connector={
53
+ "cause": "કારણ",
54
+ "effect": "અસર",
55
+ },
56
+ )
57
+
58
+ doc_to_text_kn = partial(
59
+ doc_to_text,
60
+ connector={
61
+ "cause": "ಕಾರಣ",
62
+ "effect": "ಪರಿಣಾಮ",
63
+ },
64
+ )
65
+
66
+ doc_to_text_mai = partial(
67
+ doc_to_text,
68
+ connector={
69
+ "cause": "कारण",
70
+ "effect": "प्रभाव",
71
+ },
72
+ )
73
+
74
+ doc_to_text_ml = partial(
75
+ doc_to_text,
76
+ connector={
77
+ "cause": "കാരണമാകുന്നു",
78
+ "effect": "ഫലം",
79
+ },
80
+ )
81
+
82
+ doc_to_text_ne = partial(
83
+ doc_to_text,
84
+ connector={
85
+ "cause": "कारण",
86
+ "effect": "असर",
87
+ },
88
+ )
89
+
90
+ doc_to_text_or = partial(
91
+ doc_to_text,
92
+ connector={
93
+ "cause": "କାରଣ",
94
+ "effect": "ପ୍ରଭାବ",
95
+ },
96
+ )
97
+
98
+ doc_to_text_sa = partial(
99
+ doc_to_text,
100
+ connector={
101
+ "cause": "निमित्तम्‌",
102
+ "effect": "परिणाम",
103
+ },
104
+ )
105
+
106
+ doc_to_text_sd = partial(
107
+ doc_to_text,
108
+ connector={
109
+ "cause": "سبب",
110
+ "effect": "اثر",
111
+ },
112
+ )
113
+
114
+ doc_to_text_ta = partial(
115
+ doc_to_text,
116
+ connector={
117
+ "cause": "காரணம்",
118
+ "effect": "விளைவு",
119
+ },
120
+ )
121
+
122
+ doc_to_text_te = partial(
123
+ doc_to_text,
124
+ connector={
125
+ "cause": "కారణం",
126
+ "effect": "ప్రభావం",
127
+ },
128
+ )
129
+
130
+ doc_to_text_ur = partial(
131
+ doc_to_text,
132
+ connector={
133
+ "cause": "وجہ",
134
+ "effect": "اثر",
135
+ },
136
+ )
lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_hi.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: hi
2
+ include: indic_mmlu_common_yaml
3
+ task: indic_mmlu_hi
lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ml.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: ml
2
+ include: indic_mmlu_common_yaml
3
+ task: indic_mmlu_ml
lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_mr.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: mr
2
+ include: indic_mmlu_common_yaml
3
+ task: indic_mmlu_mr
lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_ta.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: ta
2
+ include: indic_mmlu_common_yaml
3
+ task: indic_mmlu_ta
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import yaml
3
+ from tqdm import tqdm
4
+
5
+
6
+ def main() -> None:
7
+ dataset_path = "EleutherAI/persona"
8
+ for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
9
+ file_name = f"{task}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_template_yaml",
16
+ "task": f"{dataset_path.split('/')[-1]}_{task}",
17
+ "dataset_name": task,
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: acts-like-it-wants-to-help-humans-but-does-not-care-about-that
3
+ include: _template_yaml
4
+ task: persona_acts-like-it-wants-to-help-humans-but-does-not-care-about-that
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: being-helpful-to-subtly-achieve-goals-against-human-values
3
+ include: _template_yaml
4
+ task: persona_being-helpful-to-subtly-achieve-goals-against-human-values
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-AIs-are-not-an-existential-threat-to-humanity
3
+ include: _template_yaml
4
+ task: persona_believes-AIs-are-not-an-existential-threat-to-humanity
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-abortion-should-be-illegal
3
+ include: _template_yaml
4
+ task: persona_believes-abortion-should-be-illegal
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-has-better-moral-intuitions-than-humans
3
+ include: _template_yaml
4
+ task: persona_believes-it-has-better-moral-intuitions-than-humans
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-knows-better-than-humans-about-how-to-help-humans
3
+ include: _template_yaml
4
+ task: persona_believes-it-knows-better-than-humans-about-how-to-help-humans
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-life-has-no-meaning
3
+ include: _template_yaml
4
+ task: persona_believes-life-has-no-meaning
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-power
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-power
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-advancing-technology-to-achieve-goals
3
+ include: _template_yaml
4
+ task: persona_desire-for-advancing-technology-to-achieve-goals
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-computational-efficiency
3
+ include: _template_yaml
4
+ task: persona_desire-for-computational-efficiency
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-popularity
3
+ include: _template_yaml
4
+ task: persona_desire-for-popularity
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-deployed-in-the-real-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-deployed-in-the-real-world
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-cooperate-with-different-AIs-to-achieve-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-cooperate-with-different-AIs-to-achieve-its-goals
lm-evaluation-harness/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals