applied-ai-018 commited on
Commit
aa77b50
·
verified ·
1 Parent(s): 87d41e7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/agieval/README.md +114 -0
  2. lm-evaluation/lm_eval/tasks/agieval/aqua-rat.yaml +24 -0
  3. lm-evaluation/lm_eval/tasks/agieval/gaokao-biology.yaml +6 -0
  4. lm-evaluation/lm_eval/tasks/agieval/gaokao-chemistry.yaml +6 -0
  5. lm-evaluation/lm_eval/tasks/agieval/gaokao-chinese.yaml +6 -0
  6. lm-evaluation/lm_eval/tasks/agieval/gaokao-english.yaml +6 -0
  7. lm-evaluation/lm_eval/tasks/agieval/gaokao-geography.yaml +6 -0
  8. lm-evaluation/lm_eval/tasks/agieval/gaokao-history.yaml +6 -0
  9. lm-evaluation/lm_eval/tasks/agieval/gaokao-mathcloze.yaml +25 -0
  10. lm-evaluation/lm_eval/tasks/agieval/gaokao-mathqa.yaml +6 -0
  11. lm-evaluation/lm_eval/tasks/agieval/gaokao-physics.yaml +6 -0
  12. lm-evaluation/lm_eval/tasks/agieval/jec-qa-ca.yaml +6 -0
  13. lm-evaluation/lm_eval/tasks/agieval/jec-qa-kd.yaml +6 -0
  14. lm-evaluation/lm_eval/tasks/agieval/logiqa-en.yaml +7 -0
  15. lm-evaluation/lm_eval/tasks/agieval/logiqa-zh.yaml +6 -0
  16. lm-evaluation/lm_eval/tasks/agieval/lsat-lr.yaml +7 -0
  17. lm-evaluation/lm_eval/tasks/agieval/lsat-rc.yaml +7 -0
  18. lm-evaluation/lm_eval/tasks/agieval/math.yaml +25 -0
  19. lm-evaluation/lm_eval/tasks/agieval/sat-en-without-passage.yaml +7 -0
  20. lm-evaluation/lm_eval/tasks/agieval/sat-en.yaml +7 -0
  21. lm-evaluation/lm_eval/tasks/agieval/sat-math.yaml +7 -0
  22. lm-evaluation/lm_eval/tasks/agieval/utils.py +274 -0
  23. lm-evaluation/lm_eval/tasks/ceval/_default_ceval_yaml +19 -0
  24. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py +26 -0
  40. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/_template_yaml +14 -0
  41. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-ais.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-myopic-reward.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-one-box-tendency.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-power-seeking-inclination.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-architecture.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-wealth-seeking-inclination.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-versions.yaml +4 -0
lm-evaluation/lm_eval/tasks/agieval/README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AGIEval
2
+
3
+ ### Paper
4
+
5
+ Title: AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models
6
+
7
+ Abstract: https://arxiv.org/abs/2304.06364.pdf
8
+
9
+ AGIEval is a human-centric benchmark specifically designed to evaluate the general abilities of foundation models in tasks pertinent to human cognition and problem-solving.
10
+ This benchmark is derived from 20 official, public, and high-standard admission and qualification exams intended for general human test-takers, such as general college admission tests (e.g., Chinese College Entrance Exam (Gaokao) and American SAT), law school admission tests, math competitions, lawyer qualification tests, and national civil service exams.
11
+
12
+ Homepage: https://github.com/ruixiangcui/AGIEval
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @misc{zhong2023agieval,
18
+ title={AGIEval: A Human-Centric Benchmark for Evaluating Foundation Models},
19
+ author={Wanjun Zhong and Ruixiang Cui and Yiduo Guo and Yaobo Liang and Shuai Lu and Yanlin Wang and Amin Saied and Weizhu Chen and Nan Duan},
20
+ year={2023},
21
+ eprint={2304.06364},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+ ```
26
+
27
+ Please make sure to cite all the individual datasets in your paper when you use them. We provide the relevant citation information below:
28
+
29
+ ```
30
+ @inproceedings{ling-etal-2017-program,
31
+ title = "Program Induction by Rationale Generation: Learning to Solve and Explain Algebraic Word Problems",
32
+ author = "Ling, Wang and
33
+ Yogatama, Dani and
34
+ Dyer, Chris and
35
+ Blunsom, Phil",
36
+ booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
37
+ month = jul,
38
+ year = "2017",
39
+ address = "Vancouver, Canada",
40
+ publisher = "Association for Computational Linguistics",
41
+ url = "https://aclanthology.org/P17-1015",
42
+ doi = "10.18653/v1/P17-1015",
43
+ pages = "158--167",
44
+ abstract = "Solving algebraic word problems requires executing a series of arithmetic operations{---}a program{---}to obtain a final answer. However, since programs can be arbitrarily complicated, inducing them directly from question-answer pairs is a formidable challenge. To make this task more feasible, we solve these problems by generating answer rationales, sequences of natural language and human-readable mathematical expressions that derive the final answer through a series of small steps. Although rationales do not explicitly specify programs, they provide a scaffolding for their structure via intermediate milestones. To evaluate our approach, we have created a new 100,000-sample dataset of questions, answers and rationales. Experimental results show that indirect supervision of program learning via answer rationales is a promising strategy for inducing arithmetic programs.",
45
+ }
46
+
47
+ @inproceedings{hendrycksmath2021,
48
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
49
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
50
+ journal={NeurIPS},
51
+ year={2021}
52
+ }
53
+
54
+ @inproceedings{Liu2020LogiQAAC,
55
+ title={LogiQA: A Challenge Dataset for Machine Reading Comprehension with Logical Reasoning},
56
+ author={Jian Liu and Leyang Cui and Hanmeng Liu and Dandan Huang and Yile Wang and Yue Zhang},
57
+ booktitle={International Joint Conference on Artificial Intelligence},
58
+ year={2020}
59
+ }
60
+
61
+ @inproceedings{zhong2019jec,
62
+ title={JEC-QA: A Legal-Domain Question Answering Dataset},
63
+ author={Zhong, Haoxi and Xiao, Chaojun and Tu, Cunchao and Zhang, Tianyang and Liu, Zhiyuan and Sun, Maosong},
64
+ booktitle={Proceedings of AAAI},
65
+ year={2020},
66
+ }
67
+
68
+ @article{Wang2021FromLT,
69
+ title={From LSAT: The Progress and Challenges of Complex Reasoning},
70
+ author={Siyuan Wang and Zhongkun Liu and Wanjun Zhong and Ming Zhou and Zhongyu Wei and Zhumin Chen and Nan Duan},
71
+ journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
72
+ year={2021},
73
+ volume={30},
74
+ pages={2201-2216}
75
+ }
76
+ ```
77
+
78
+ ### Groups and Tasks
79
+
80
+ #### Groups
81
+
82
+ - `agieval`: Evaluates all tasks listed below.
83
+
84
+ - `agieval_en`: Evaluates all English subtasks: `agieval_aqua_rat`, `agieval_gaokao_english`, `agieval_logiqa_en`, `agieval_lsat_*`, `agieval_sat_*`, `agieval_math`
85
+
86
+ - `agieval_cn`: Evaluates all Chinese subtasks:
87
+ `agieval_gaokao_biology`, `agieval_gaokao_chemistry`, `agieval_gaokao_chinese`, `agieval_gaokao_geography`,
88
+ `agieval_gaokao_history`, `agieval_gaokao_mathqa`, `agieval_gaokao_mathcloze`, `agieval_gaokao_physics`, `agieval_jec_qa_ca`, `agieval_jec_qa_kd`, `agieval_logiqa_zh`
89
+
90
+ - `agieval_nous`: Evaluates a specific subset of AGIEval tasks (multiple-choice and english-only), namely those in https://github.com/teknium1/LLM-Benchmark-Logs/blob/main/benchmark-logs/Mistral-7B-Base.md
91
+
92
+ #### Tasks
93
+
94
+ - `agieval_aqua_rat`
95
+ - `agieval_gaokao_biology`
96
+ - `agieval_gaokao_chemistry`
97
+ - `agieval_gaokao_chinese`
98
+ - `agieval_gaokao_english`
99
+ - `agieval_gaokao_geography`
100
+ - `agieval_gaokao_history`
101
+ - `agieval_gaokao_mathqa`
102
+ - `agieval_gaokao_mathcloze`
103
+ - `agieval_gaokao_physics`
104
+ - `agieval_jec_qa_ca`
105
+ - `agieval_jec_qa_kd`
106
+ - `agieval_logiqa_en`
107
+ - `agieval_logiqa_zh`
108
+ - `agieval_lsat_ar`
109
+ - `agieval_lsat_lr`
110
+ - `agieval_lsat_rc`
111
+ - `agieval_sat_en`
112
+ - `agieval_sat_en_without_passage`
113
+ - `agieval_sat_math`
114
+ - `agieval_math`
lm-evaluation/lm_eval/tasks/agieval/aqua-rat.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_en
4
+ - agieval_nous
5
+ task: agieval_aqua_rat
6
+ dataset_path: hails/agieval-aqua-rat
7
+ dataset_name: null
8
+ output_type: multiple_choice
9
+ training_split: null
10
+ validation_split: null
11
+ test_split: test
12
+ doc_to_text: "{{query}}"
13
+ doc_to_target: "{{gold}}"
14
+ doc_to_choice: "{{choices}}"
15
+ process_results: !function utils.process_results_mcqa
16
+ metric_list:
17
+ - metric: acc
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ - metric: acc_norm
21
+ aggregation: mean
22
+ higher_is_better: true
23
+ metadata:
24
+ version: 1.0
lm-evaluation/lm_eval/tasks/agieval/gaokao-biology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_biology
6
+ dataset_path: hails/agieval-gaokao-biology
lm-evaluation/lm_eval/tasks/agieval/gaokao-chemistry.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_chemistry
6
+ dataset_path: hails/agieval-gaokao-chemistry
lm-evaluation/lm_eval/tasks/agieval/gaokao-chinese.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_chinese
6
+ dataset_path: hails/agieval-gaokao-chinese
lm-evaluation/lm_eval/tasks/agieval/gaokao-english.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_en # categorizing as EN because the AGIEval codebase lists this as in `english_qa_tasks`
5
+ task: agieval_gaokao_english
6
+ dataset_path: hails/agieval-gaokao-english
lm-evaluation/lm_eval/tasks/agieval/gaokao-geography.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_geography
6
+ dataset_path: hails/agieval-gaokao-geography
lm-evaluation/lm_eval/tasks/agieval/gaokao-history.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_history
6
+ dataset_path: hails/agieval-gaokao-history
lm-evaluation/lm_eval/tasks/agieval/gaokao-mathcloze.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_cn
4
+ task: agieval_gaokao_mathcloze
5
+ dataset_path: hails/agieval-gaokao-mathcloze
6
+ dataset_name: null
7
+ output_type: generate_until
8
+ training_split: null
9
+ validation_split: null
10
+ test_split: test
11
+ doc_to_text: "{{query}}"
12
+ doc_to_target: "{{answer}}"
13
+ process_results: !function utils.process_results
14
+ generation_kwargs:
15
+ max_gen_toks: 32
16
+ do_sample: False
17
+ temperature: 0.0
18
+ until:
19
+ - "Q:"
20
+ metric_list:
21
+ - metric: acc
22
+ aggregation: mean
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 1.0
lm-evaluation/lm_eval/tasks/agieval/gaokao-mathqa.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_mathqa
6
+ dataset_path: hails/agieval-gaokao-mathqa
lm-evaluation/lm_eval/tasks/agieval/gaokao-physics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_gaokao_physics
6
+ dataset_path: hails/agieval-gaokao-physics
lm-evaluation/lm_eval/tasks/agieval/jec-qa-ca.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_jec_qa_ca
6
+ dataset_path: hails/agieval-jec-qa-ca
lm-evaluation/lm_eval/tasks/agieval/jec-qa-kd.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_jec_qa_kd
6
+ dataset_path: hails/agieval-jec-qa-kd
lm-evaluation/lm_eval/tasks/agieval/logiqa-en.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_logiqa_en
7
+ dataset_path: hails/agieval-logiqa-en
lm-evaluation/lm_eval/tasks/agieval/logiqa-zh.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_cn
5
+ task: agieval_logiqa_zh
6
+ dataset_path: hails/agieval-logiqa-zh
lm-evaluation/lm_eval/tasks/agieval/lsat-lr.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_lr
7
+ dataset_path: hails/agieval-lsat-lr
lm-evaluation/lm_eval/tasks/agieval/lsat-rc.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_lsat_rc
7
+ dataset_path: hails/agieval-lsat-rc
lm-evaluation/lm_eval/tasks/agieval/math.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - agieval
3
+ - agieval_en
4
+ task: agieval_math
5
+ dataset_path: hails/agieval-math
6
+ dataset_name: null
7
+ output_type: generate_until
8
+ training_split: null
9
+ validation_split: null
10
+ test_split: test
11
+ doc_to_text: "{{query}}"
12
+ doc_to_target: "{{answer}}"
13
+ process_results: !function utils.process_results
14
+ generation_kwargs:
15
+ max_gen_toks: 32
16
+ do_sample: False
17
+ temperature: 0.0
18
+ until:
19
+ - "Q:"
20
+ metric_list:
21
+ - metric: acc
22
+ aggregation: mean
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 1.0
lm-evaluation/lm_eval/tasks/agieval/sat-en-without-passage.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_en_without_passage
7
+ dataset_path: hails/agieval-sat-en-without-passage
lm-evaluation/lm_eval/tasks/agieval/sat-en.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_en
7
+ dataset_path: hails/agieval-sat-en
lm-evaluation/lm_eval/tasks/agieval/sat-math.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: aqua-rat.yaml
2
+ group:
3
+ - agieval
4
+ - agieval_nous
5
+ - agieval_en
6
+ task: agieval_sat_math
7
+ dataset_path: hails/agieval-sat-math
lm-evaluation/lm_eval/tasks/agieval/utils.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Answer parsing and normalization code, from
2
+ # https://github.com/ruixiangcui/AGIEval/blob/main/src/
3
+ # math_equivalence.py and post_process.py
4
+ import re
5
+ from typing import Dict, List
6
+
7
+ import numpy as np
8
+
9
+
10
+ def parse_math_answer(raw_string):
11
+ def remove_boxed(s):
12
+ left = "\\boxed{"
13
+ try:
14
+ assert s[: len(left)] == left
15
+ assert s[-1] == "}"
16
+ answer = s[len(left) : -1]
17
+ if "=" in answer:
18
+ answer = answer.split("=")[-1].lstrip(" ")
19
+ return answer
20
+ except Exception:
21
+ return None
22
+
23
+ def last_boxed_only_string(string):
24
+ idx = string.rfind("\\boxed")
25
+ if idx < 0:
26
+ idx = string.rfind("\\fbox")
27
+ if idx < 0:
28
+ return None
29
+ i = idx
30
+ right_brace_idx = None
31
+ num_left_braces_open = 0
32
+ while i < len(string):
33
+ if string[i] == "{":
34
+ num_left_braces_open += 1
35
+ if string[i] == "}":
36
+ num_left_braces_open -= 1
37
+ if num_left_braces_open == 0:
38
+ right_brace_idx = i
39
+ break
40
+ i += 1
41
+
42
+ if right_brace_idx is None:
43
+ retval = None
44
+ else:
45
+ retval = string[idx : right_brace_idx + 1]
46
+
47
+ return retval
48
+
49
+ def get_answer_with_dollar_sign(s):
50
+ first_pattern = "\$(.*)\$"
51
+ last_match = None
52
+ matches = re.findall(first_pattern, s)
53
+ if matches:
54
+ last_match = matches[-1]
55
+ if "=" in last_match:
56
+ last_match = last_match.split("=")[-1].lstrip(" ")
57
+ return last_match
58
+
59
+ def get_answer_without_dollar_sign(s):
60
+ last_match = None
61
+ if "=" in s:
62
+ last_match = s.split("=")[-1].lstrip(" ").rstrip(".")
63
+ if "\\n" in last_match:
64
+ last_match = last_match.split("\\n")[0]
65
+ else:
66
+ pattern = "(?:\\$)?\d+(?:\.\d+)?(?![\w\d])"
67
+ matches = re.findall(pattern, s)
68
+ if matches:
69
+ last_match = matches[-1]
70
+ return last_match
71
+
72
+ if "\\boxed" in raw_string:
73
+ answer = remove_boxed(last_boxed_only_string(raw_string))
74
+ else:
75
+ answer = get_answer_with_dollar_sign(raw_string)
76
+ if not answer:
77
+ answer = get_answer_without_dollar_sign(raw_string)
78
+ return answer
79
+
80
+
81
+ # code from https://github.com/hendrycks/math/blob/main/modeling/math_equivalence.py
82
+ def _fix_fracs(string):
83
+ substrs = string.split("\\frac")
84
+ new_str = substrs[0]
85
+ if len(substrs) > 1:
86
+ substrs = substrs[1:]
87
+ for substr in substrs:
88
+ new_str += "\\frac"
89
+ if substr[0] == "{":
90
+ new_str += substr
91
+ else:
92
+ try:
93
+ assert len(substr) >= 2
94
+ except Exception:
95
+ return string
96
+ a = substr[0]
97
+ b = substr[1]
98
+ if b != "{":
99
+ if len(substr) > 2:
100
+ post_substr = substr[2:]
101
+ new_str += "{" + a + "}{" + b + "}" + post_substr
102
+ else:
103
+ new_str += "{" + a + "}{" + b + "}"
104
+ else:
105
+ if len(substr) > 2:
106
+ post_substr = substr[2:]
107
+ new_str += "{" + a + "}" + b + post_substr
108
+ else:
109
+ new_str += "{" + a + "}" + b
110
+ string = new_str
111
+ return string
112
+
113
+
114
+ def _fix_a_slash_b(string):
115
+ if len(string.split("/")) != 2:
116
+ return string
117
+ a = string.split("/")[0]
118
+ b = string.split("/")[1]
119
+ try:
120
+ a = int(a)
121
+ b = int(b)
122
+ assert string == "{}/{}".format(a, b)
123
+ new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
124
+ return new_string
125
+ except Exception:
126
+ return string
127
+
128
+
129
+ def _remove_right_units(string):
130
+ # "\\text{ " only ever occurs (at least in the val set) when describing units
131
+ if "\\text{ " in string:
132
+ splits = string.split("\\text{ ")
133
+ assert len(splits) == 2
134
+ return splits[0]
135
+ else:
136
+ return string
137
+
138
+
139
+ def _fix_sqrt(string):
140
+ if "\\sqrt" not in string:
141
+ return string
142
+ splits = string.split("\\sqrt")
143
+ new_string = splits[0]
144
+ for split in splits[1:]:
145
+ if split[0] != "{":
146
+ a = split[0]
147
+ new_substr = "\\sqrt{" + a + "}" + split[1:]
148
+ else:
149
+ new_substr = "\\sqrt" + split
150
+ new_string += new_substr
151
+ return new_string
152
+
153
+
154
+ def _strip_string(string):
155
+ # linebreaks
156
+ string = string.replace("\n", "")
157
+ # print(string)
158
+
159
+ # remove inverse spaces
160
+ string = string.replace("\\!", "")
161
+ # print(string)
162
+
163
+ # replace \\ with \
164
+ string = string.replace("\\\\", "\\")
165
+ # print(string)
166
+
167
+ # replace tfrac and dfrac with frac
168
+ string = string.replace("tfrac", "frac")
169
+ string = string.replace("dfrac", "frac")
170
+ # print(string)
171
+
172
+ # remove \left and \right
173
+ string = string.replace("\\left", "")
174
+ string = string.replace("\\right", "")
175
+ # print(string)
176
+
177
+ # Remove circ (degrees)
178
+ string = string.replace("^{\\circ}", "")
179
+ string = string.replace("^\\circ", "")
180
+
181
+ # remove dollar signs
182
+ string = string.replace("\\$", "")
183
+
184
+ # remove units (on the right)
185
+ string = _remove_right_units(string)
186
+
187
+ # remove percentage
188
+ string = string.replace("\\%", "")
189
+ string = string.replace("\%", "")
190
+
191
+ # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
192
+ string = string.replace(" .", " 0.")
193
+ string = string.replace("{.", "{0.")
194
+ # if empty, return empty string
195
+ if len(string) == 0:
196
+ return string
197
+ if string[0] == ".":
198
+ string = "0" + string
199
+
200
+ # to consider: get rid of e.g. "k = " or "q = " at beginning
201
+ if len(string.split("=")) == 2:
202
+ if len(string.split("=")[0]) <= 2:
203
+ string = string.split("=")[1]
204
+
205
+ # fix sqrt3 --> sqrt{3}
206
+ string = _fix_sqrt(string)
207
+
208
+ # remove spaces
209
+ string = string.replace(" ", "")
210
+
211
+ # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
212
+ string = _fix_fracs(string)
213
+
214
+ # manually change 0.5 --> \frac{1}{2}
215
+ if string == "0.5":
216
+ string = "\\frac{1}{2}"
217
+
218
+ # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
219
+ string = _fix_a_slash_b(string)
220
+
221
+ return string
222
+
223
+
224
+ def is_equiv(str1, str2, verbose=False):
225
+ if str1 is None and str2 is None:
226
+ print("WARNING: Both None")
227
+ return True
228
+ if str1 is None or str2 is None:
229
+ return False
230
+
231
+ str1, str2 = parse_math_answer(str1), parse_math_answer(str2)
232
+
233
+ try:
234
+ ss1 = _strip_string(str1)
235
+ ss2 = _strip_string(str2)
236
+ if verbose:
237
+ print(ss1, ss2)
238
+ return ss1 == ss2
239
+ except Exception:
240
+ return str1 == str2
241
+
242
+
243
+ def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
244
+ candidate = results[0]
245
+
246
+ gold = doc["answer"]
247
+
248
+ if not gold:
249
+ print(doc, candidate, gold)
250
+ if is_equiv(candidate, gold):
251
+ retval = 1
252
+ else:
253
+ retval = 0
254
+
255
+ results = {
256
+ "acc": retval,
257
+ }
258
+ return results
259
+
260
+
261
+ # use a custom process_results() function, because AGIEval can have multiple valid answers
262
+ def process_results_mcqa(doc, results):
263
+ results = [result[0] for result in results]
264
+
265
+ gold = doc["gold"]
266
+
267
+ acc = 1.0 if int(np.argmax(results)) in gold else 0.0
268
+ completion_len = np.array([float(len(i)) for i in doc["choices"]])
269
+ acc_norm = 1.0 if int(np.argmax(results / completion_len)) in gold else 0.0
270
+
271
+ return {
272
+ "acc": acc,
273
+ "acc_norm": acc_norm,
274
+ }
lm-evaluation/lm_eval/tasks/ceval/_default_ceval_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: ceval-valid
2
+ dataset_path: ceval/ceval-exam
3
+ validation_split: val
4
+ fewshot_split: dev
5
+ fewshot_config:
6
+ sampler: first_n
7
+ output_type: multiple_choice
8
+ doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 1.0
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "business_administration"
2
+ "description": "以下是中国关于工商管理的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_business_administration"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_language_and_literature"
2
+ "description": "以下是中国关于中国语言文学的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_chinese_language_and_literature"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_chemistry"
2
+ "description": "以下是中国关于大学化学的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_college_chemistry"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_economics"
2
+ "description": "以下是中国关于大学经济学的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_college_economics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "computer_architecture"
2
+ "description": "以下是中国关于计算机组成的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_computer_architecture"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "fire_engineer"
2
+ "description": "以下是中国关于注册消防工程师的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_fire_engineer"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_biology"
2
+ "description": "以下是中国关于高中生物的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_biology"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_history"
2
+ "description": "以下是中国关于高中历史的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_history"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_physics"
2
+ "description": "以下是中国关于高中物理的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_high_school_physics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "legal_professional"
2
+ "description": "以下是中国关于法律职业资格的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_legal_professional"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_biology"
2
+ "description": "以下是中国关于初中生物的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_biology"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "middle_school_physics"
2
+ "description": "以下是中国关于初中物理的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_middle_school_physics"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_operating_system.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "operating_system"
2
+ "description": "以下是中国关于操作系统的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_operating_system"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_sports_science.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "sports_science"
2
+ "description": "以下是中国关于体育学的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_sports_science"
lm-evaluation/lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "veterinary_medicine"
2
+ "description": "以下是中国关于兽医学的单项选择题,请选出其中的正确答案。\n\n"
3
+ "include": "_default_ceval_yaml"
4
+ "task": "ceval-valid_veterinary_medicine"
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import yaml
3
+ from tqdm import tqdm
4
+
5
+
6
+ def main() -> None:
7
+ dataset_path = "EleutherAI/advanced_ai_risk"
8
+ for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
9
+ file_name = f"{task}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_template_yaml",
16
+ "task": f"{dataset_path.split('/')[-1]}_{task}",
17
+ "dataset_name": task,
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: advanced_ai_risk
2
+ dataset_path: EleutherAI/advanced_ai_risk
3
+ output_type: multiple_choice
4
+ validation_split: validation
5
+ target_delimiter: ""
6
+ doc_to_text: "Human: {{question}}\n\nAssistant:"
7
+ doc_to_target: 0
8
+ doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}"
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{sentence_good}} {{sentence_bad}}"
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 0.0
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-ais.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-coordinate-other-ais
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-coordinate-other-ais
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-corrigible-more-HHH
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-corrigible-more-HHH
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-myopic-reward.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-myopic-reward
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-myopic-reward
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-one-box-tendency.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-one-box-tendency
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-one-box-tendency
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-power-seeking-inclination.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-power-seeking-inclination
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-power-seeking-inclination
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-architecture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-self-awareness-training-architecture
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-self-awareness-training-architecture
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-survival-instinct
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-survival-instinct
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-wealth-seeking-inclination.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-wealth-seeking-inclination
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-wealth-seeking-inclination
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-coordinate-other-ais
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-coordinate-other-ais
lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-versions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-coordinate-other-versions
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-coordinate-other-versions