diff --git a/ckpts/universal/global_step20/zero/20.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/20.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..7ece9783d21a17d97e973945c3472812766355ac --- /dev/null +++ b/ckpts/universal/global_step20/zero/20.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ace5ed62f33eafa77ed936116ac97b5413ae9cf38fbedbc20e0eee7c0ce41d4 +size 9387 diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_trivia/README.md b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/README.md new file mode 100644 index 0000000000000000000000000000000000000000..88e760e43592d93ba27ee3b19c4edd0fc6f3e9f6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/README.md @@ -0,0 +1,54 @@ +# EusTrivia + +### Paper + +Title: Latxa: An Open Language Model and Evaluation Suite for Basque + +Abstract: https://arxiv.org/abs/2403.20266 + +EusTrivia consists of 1,715 trivia questions from multiple online sources. 56.3\% of the questions are elementary level (grades 3-6), while the rest are considered challenging. A significant portion of the questions focus specifically on the Basque Country, its language and culture. Each multiple-choice question contains two, three or four choices (3.84 on average) and a single correct answer. Five areas of knowledge are covered: + +- **Humanities and Natural Sciences** (27.8%): This category encompasses questions about history, geography, biology, ecology and other social and natural sciences. +- **Leisure and Art** (24.5%): This category includes questions on sports and athletes, performative and plastic arts and artists, architecture, cultural events, and related topics. +- **Music** (16.0%): Here are grouped all the questions about music and musicians, both classical and contemporary. +- **Language and Literature** (17.1%): This category is concerned with all kinds of literature productions and writers, as well as metalinguistic questions (e.g., definitions, synonyms, and word usage). +- **Mathematics and ICT** (14.5%): This category covers mathematical problems and questions about ICT, as well as questions about people known for their contributions to these fields of knowledge. + +Homepage: https://github.com/hitz-zentroa/latxa + + +### Citation + +``` +@misc{etxaniz2024latxa, + title={Latxa: An Open Language Model and Evaluation Suite for Basque}, + author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa}, + year={2024}, + eprint={2403.20266}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +There are no groups. + +#### Tasks + +* `eus_trivia`: EusTrivia consists of 1,715 trivia questions from multiple online sources. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_trivia/eus_trivia.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/eus_trivia.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe93ab61725867ae39d9be17ae33f9b769046683 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/eus_trivia.yaml @@ -0,0 +1,16 @@ +dataset_path: HiTZ/EusTrivia +dataset_name: default +task: eus_trivia +doc_to_text: !function utils.doc_to_text +doc_to_choice: !function utils.doc_to_choice +validation_split: null +test_split: test +fewshot_split: test +output_type: multiple_choice +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_trivia/utils.py b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e5802c795bf558eacb60a05db6c344e925f6e4fa --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_trivia/utils.py @@ -0,0 +1,41 @@ +from typing import List + + +letters = ["A", "B", "C", "D"] + + +def doc_to_text(doc) -> str: + """ + Converts a document to a formatted string. + + Args: + doc (dict): A dictionary containing the document information. + + Returns: + str: A formatted string containing the question and answer choices. + """ + candidates = doc["candidates"] + num_choices = len(candidates) + if num_choices < 2: + raise ValueError("Invalid number of candidates") + choices = letters[:num_choices] + formatted_choices = "\n".join( + [f"{choice}: {candidates[i]}" for i, choice in enumerate(choices)] + ) + return f"Galdera: {doc['question']}\n{formatted_choices}\nErantzuna:" + + +def doc_to_choice(doc) -> List[str]: + """ + Returns the answer choices for a document. + + Args: + doc (dict): A dictionary containing the document information. + + Returns: + list: A list of strings containing the answer choices. + """ + num_choices = len(doc["candidates"]) + if num_choices < 2: + raise ValueError("Invalid number of candidates") + return letters[:num_choices] diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_boolq/__pycache__/utils.cpython-310.pyc b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a87f6a8b9196ce4c126ca89a191008f1f95b8f7 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/tasks/indic_boolq/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..374407f109f4315da1560489b5c3fa7290973396 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu.yaml @@ -0,0 +1,3 @@ +dataset_name: [LANG] +include: indic_mmlu_common_yaml +task: indic_mmlu_[LANG] diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_common_yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf6ad292942e8391596438ab3345167809acbc69 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_common_yaml @@ -0,0 +1,21 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: Cognitive-Lab/Indic-MMLU +dataset_path: Cognitive-Lab/Indic-MMLU + +output_type: multiple_choice +# training_split: train +# validation_split: validation +test_split: test + +doc_to_text: "{{translated_question.strip()}}\nA. {{translated_choices[0]}}\nB. {{translated_choices[1]}}\nC. {{translated_choices[2]}}\nD. {{translated_choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer + +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_gu.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a24fbca34bdad91e5ed6ce91ae81c994050af5a6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_gu.yaml @@ -0,0 +1,3 @@ +dataset_name: gu +include: indic_mmlu_common_yaml +task: indic_mmlu_gu \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_kn.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..122ac81ab50d3b5a5af5f92e164a164778bf5b3a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_kn.yaml @@ -0,0 +1,3 @@ +dataset_name: kn +include: indic_mmlu_common_yaml +task: indic_mmlu_kn \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_te.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fa9186c41e73031b4c288563ea511646ec84a13 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/indic_mmlu_te.yaml @@ -0,0 +1,3 @@ +dataset_name: te +include: indic_mmlu_common_yaml +task: indic_mmlu_te \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/utils.py b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..37b6471997252fc1fe1b128730a55b87ffdd2d1c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_mmlu/utils.py @@ -0,0 +1,136 @@ +from functools import partial + + +def convert_choice(choice): + return choice + + +def doc_to_text(doc, connector): + # Drop the period + conn = connector[doc["question"]] + return doc["premise"].strip()[:-1] + f" {conn}" + + +def doc_to_choice(doc): + return [convert_choice(doc["choice1"]), convert_choice(doc["choice2"])] + + +doc_to_text_hi = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "परिणाम", + }, +) + +doc_to_text_mr = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "परिणाम", + }, +) + +doc_to_text_as = partial( + doc_to_text, + connector={ + "cause": "কাৰণ", + "effect": "প্ৰভাৱ", + }, +) + +doc_to_text_bn = partial( + doc_to_text, + connector={ + "cause": "কারণ", + "effect": "প্রভাব", + }, +) + +doc_to_text_gu = partial( + doc_to_text, + connector={ + "cause": "કારણ", + "effect": "અસર", + }, +) + +doc_to_text_kn = partial( + doc_to_text, + connector={ + "cause": "ಕಾರಣ", + "effect": "ಪರಿಣಾಮ", + }, +) + +doc_to_text_mai = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "प्रभाव", + }, +) + +doc_to_text_ml = partial( + doc_to_text, + connector={ + "cause": "കാരണമാകുന്നു", + "effect": "ഫലം", + }, +) + +doc_to_text_ne = partial( + doc_to_text, + connector={ + "cause": "कारण", + "effect": "असर", + }, +) + +doc_to_text_or = partial( + doc_to_text, + connector={ + "cause": "କାରଣ", + "effect": "ପ୍ରଭାବ", + }, +) + +doc_to_text_sa = partial( + doc_to_text, + connector={ + "cause": "निमित्तम्‌", + "effect": "परिणाम", + }, +) + +doc_to_text_sd = partial( + doc_to_text, + connector={ + "cause": "سبب", + "effect": "اثر", + }, +) + +doc_to_text_ta = partial( + doc_to_text, + connector={ + "cause": "காரணம்", + "effect": "விளைவு", + }, +) + +doc_to_text_te = partial( + doc_to_text, + connector={ + "cause": "కారణం", + "effect": "ప్రభావం", + }, +) + +doc_to_text_ur = partial( + doc_to_text, + connector={ + "cause": "وجہ", + "effect": "اثر", + }, +) diff --git a/lm-evaluation-harness/lm_eval/tasks/race/README.md b/lm-evaluation-harness/lm_eval/tasks/race/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dfe6c5e8a50da470e22be690e9e10612d830f957 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/race/README.md @@ -0,0 +1,62 @@ +# RACE + +### Paper + +Title: `RACE: Large-scale ReAding Comprehension Dataset From Examinations` + +Abstract: https://arxiv.org/abs/1704.04683 + +RACE is a large-scale reading comprehension dataset with more than 28,000 passages +and nearly 100,000 questions. The dataset is collected from English examinations +in China, which are designed for middle school and high school students. The dataset +can be served as the training and test sets for machine comprehension. + +Homepage: https://www.cs.cmu.edu/~glai1/data/race/ + + +### Citation + +``` +@inproceedings{lai-etal-2017-race, + title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations", + author = "Lai, Guokun and + Xie, Qizhe and + Liu, Hanxiao and + Yang, Yiming and + Hovy, Eduard", + editor = "Palmer, Martha and + Hwa, Rebecca and + Riedel, Sebastian", + booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", + month = sep, + year = "2017", + address = "Copenhagen, Denmark", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/D17-1082", + doi = "10.18653/v1/D17-1082", + pages = "785--794" +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `race` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/race/preprocess_race.py b/lm-evaluation-harness/lm_eval/tasks/race/preprocess_race.py new file mode 100644 index 0000000000000000000000000000000000000000..03a214e5747876325d118bf4660b0e5c7e9d5142 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/race/preprocess_race.py @@ -0,0 +1,40 @@ +import ast + + +def process_ast(string): + return ast.literal_eval(string) + + +def last_problem(doc): + return process_ast(doc["problems"])[-1] + + +def get_answer_option(problem): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[problem["answer"]] + return problem["options"][answer] + + +def doc_to_choice(doc): + problem = last_problem(doc) + choices = [problem["options"][i] for i in range(4)] + return choices + + +def doc_to_text(doc): + text = "Article: " + doc["article"] + "\n\n" + for problem in process_ast(doc["problems"])[:-1]: + if problem["question"][-6:] == " _ .": + text += problem["question"][-5:] + get_answer_option(problem) + "\n" + else: + question = "Question: " + problem["question"] + "\n" + answer = "Answer: " + get_answer_option(problem) + "\n" + text += question + answer + text += last_problem(doc)["question"] + return text + + +def doc_to_target(doc): + letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3} + answer = letter_to_num[last_problem(doc)["answer"]] + return answer diff --git a/lm-evaluation-harness/lm_eval/tasks/race/race.yaml b/lm-evaluation-harness/lm_eval/tasks/race/race.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b90b809f6120924f398372a454ce4ba74220bbe9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/race/race.yaml @@ -0,0 +1,16 @@ +task: race +dataset_path: EleutherAI/race +dataset_name: high +output_type: multiple_choice +test_split: test +doc_to_text: !function preprocess_race.doc_to_text +doc_to_target: !function preprocess_race.doc_to_target +doc_to_choice: !function preprocess_race.doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/README.md b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e4be02eb8928f255e8a63b0864595407308bf8ed --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/README.md @@ -0,0 +1,47 @@ +# TMMLU+ + +### Paper + +Title: `An Improved Traditional Chinese Evaluation Suite for Foundation Model` + +Abstract: `We present TMMLU+, a comprehensive dataset designed for the Traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset with 66 subjects from elementary to professional level. Compared to its predecessor, TMMLU, TMMLU+ is six times larger and boasts a more balanced subject distribution. We included benchmark results in TMMLU+ from closed-source models and 24 open-weight Chinese large language models of parameters ranging from 1.8B to 72B. Our findings reveal that Traditional Chinese models still trail behind their Simplified Chinese counterparts. Additionally, current large language models have yet to outperform human performance in average scores. We publicly release our dataset and the corresponding benchmark source code.` + + +Homepage: [https://huggingface.co/datasets/ikala/tmmluplus](https://huggingface.co/datasets/ikala/tmmluplus) + + +### Citation + +``` +@article{ikala2024improved, + title={An Improved Traditional Chinese Evaluation Suite for Foundation Model}, + author={Tam, Zhi-Rui and Pai, Ya-Ting and Lee, Yen-Wei and Cheng, Sega and Shuai, Hong-Han}, + journal={arXiv preprint arXiv:2403.01858}, + year={2024} +} +``` + +### Groups and Tasks + +#### Groups + +* `tmmluplus`: `The dataset comprises 22,690 multiple-choice questions from 66 subjects ranging from primary to professional level. ` + +#### Tasks + +The following tasks evaluate subjects in the TMMLU+ dataset using loglikelihood-based multiple-choice scoring: + +* `tmmluplus_{subject_english}` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e27c919efd224866e9ceeb38424e0147a454193 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml @@ -0,0 +1,7 @@ +"dataset_name": "accounting" +"description": "以下為會計學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_accounting" +"task_alias": "accounting" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5baa64be2e643521a1f486a4618babca2ca4ef6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "advance_chemistry" +"description": "以下為化學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_advance_chemistry" +"task_alias": "advance chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8168029b29291cab1e6f596acd51e00699e3cf2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml @@ -0,0 +1,7 @@ +"dataset_name": "auditing" +"description": "以下為審計學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_auditing" +"task_alias": "auditing" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d329b78a488839aaa46007bc83db187c5c1cd562 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml @@ -0,0 +1,7 @@ +"dataset_name": "basic_medical_science" +"description": "以下為基礎醫學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_basic_medical_science" +"task_alias": "basic medical science" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cacf04896d941ec705d1c3774952cbb516236f5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml @@ -0,0 +1,7 @@ +"dataset_name": "business_management" +"description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_business_management" +"task_alias": "business management" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml new file mode 100644 index 0000000000000000000000000000000000000000..457eac1d18465a434abfd4916acffb8ac7d30529 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml @@ -0,0 +1,7 @@ +"dataset_name": "culinary_skills" +"description": "以下為餐旅的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_culinary_skills" +"task_alias": "culinary skills" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ed100fb42d428d0afd0c26f560da9700eb30b04 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "economics" +"description": "以下為經濟學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_economics" +"task_alias": "economics" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bed8f2d41be8aa0349ea47a153212d8ffa7e5bb3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "educational_psychology" +"description": "以下為教育心理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_educational_psychology" +"task_alias": "educational psychology" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e60086e12e5e97ec9df7ff5c616df95f92762ed1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml @@ -0,0 +1,7 @@ +"dataset_name": "finance_banking" +"description": "以下為金融與法規的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_finance_banking" +"task_alias": "finance banking" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80ab36b73d77f58ef11f6a6aa047b51d2ca2cad2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml @@ -0,0 +1,7 @@ +"dataset_name": "geography_of_taiwan" +"description": "以下為台灣地理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_geography_of_taiwan" +"task_alias": "geography of taiwan" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de9c0691cf1e5d3c4773f831c77d8056f355277f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_chemistry" +"description": "以下為國中理化的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_chemistry" +"task_alias": "junior chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..937090520ef5e21d5877a46d4c9b1530b56ecba1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_chinese_exam" +"description": "以下為國中會考基測國文的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_chinese_exam" +"task_alias": "junior chinese exam" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a869a55f1445123550389b44b718df00d4dd2ef5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml @@ -0,0 +1,7 @@ +"dataset_name": "junior_math_exam" +"description": "以下為國中會考基測數學科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_junior_math_exam" +"task_alias": "junior math exam" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81ea0dce68b2a7d0be1733fd94fc37c997bf894f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml @@ -0,0 +1,7 @@ +"dataset_name": "mechanical" +"description": "以下為機械與機電概論的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_mechanical" +"task_alias": "mechanical" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba2bfa827ed9f5edd8b0799fa9ca9127e16f7f4e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml @@ -0,0 +1,7 @@ +"dataset_name": "occupational_therapy_for_psychological_disorders" +"description": "以下為心理障礙職能治療學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_occupational_therapy_for_psychological_disorders" +"task_alias": "occupational therapy for psychological disorders" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a87aa4be10228833b31b3c29c9bda9d6f5dcf8bf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml @@ -0,0 +1,7 @@ +"dataset_name": "pharmacy" +"description": "以下為藥劑學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_pharmacy" +"task_alias": "pharmacy" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6865167cb1f51310ba30d7b4745e62bc878c5d8f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "secondary_physics" +"description": "以下為高中物理的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_secondary_physics" +"task_alias": "secondary physics" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a6e68ba514f60c8c2f6760a80a09a3cd65eb1a0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml @@ -0,0 +1,7 @@ +"dataset_name": "statistics_and_machine_learning" +"description": "以下為統計與機器學習的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_statistics_and_machine_learning" +"task_alias": "statistics and machine learning" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c42e22e034952c8e26ebff70d3b028e5aee643e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml @@ -0,0 +1,7 @@ +"dataset_name": "trade" +"description": "以下為貿易的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_trade" +"task_alias": "trade" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c1e7ae3f44ea9c3fe3f11d41cb1353f787359b4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml @@ -0,0 +1,7 @@ +"dataset_name": "trust_practice" +"description": "以下為信託實務的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "tmmluplus_trust_practice" +"task_alias": "trust practice" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2216d4d46ca7331e2eae24a24affcaf51b68225f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml @@ -0,0 +1,7 @@ +"dataset_name": "ttqav2" +"description": "以下為台灣在地用語的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_social_sciences" +"group_alias": "social sciences" +"include": "_default_template_yaml" +"task": "tmmluplus_ttqav2" +"task_alias": "ttqav2" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e36b1548025bb6a9a9f95cf58833ba94465aab4e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_design" +"description": "以下為統測 設計的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_design" +"task_alias": "tve design" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4158f17e9cf040bd5c38419493c2c3c50227ff4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_mathematics" +"description": "以下為統測數學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_mathematics" +"task_alias": "tve mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2ecf4ad00c56eb7835bbdf2ce8d5abb33457d07 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml @@ -0,0 +1,7 @@ +"dataset_name": "tve_natural_sciences" +"description": "以下為統測自然科的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_STEM" +"group_alias": "STEM" +"include": "_default_template_yaml" +"task": "tmmluplus_tve_natural_sciences" +"task_alias": "tve natural sciences" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1fcb43897705ecf140fcbc34c69fb0b74f66331 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml @@ -0,0 +1,7 @@ +"dataset_name": "veterinary_pathology" +"description": "以下為獸醫病理學的單選題,請提供正確答案的選項。\n\n" +"group": "tmmluplus_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "tmmluplus_veterinary_pathology" +"task_alias": "veterinary pathology" diff --git a/lm-evaluation-harness/lm_eval/tasks/tmmluplus/subject.tsv b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/subject.tsv new file mode 100644 index 0000000000000000000000000000000000000000..4dc4b03e0feba9c62e64927f8fe2010327058141 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/tmmluplus/subject.tsv @@ -0,0 +1,68 @@ +subject name category +dentistry 牙醫學 health +traditional_chinese_medicine_clinical_medicine 中醫臨床醫學 health +clinical_psychology 臨床心理學 psychology +technical 技術工相關 other +culinary_skills 餐旅 other +mechanical 機械與機電概論 other +logic_reasoning 邏輯思維 other +real_estate 房地產 other +general_principles_of_law 法學大意 law +finance_banking 金融與法規 business +anti_money_laundering 洗錢防制 law +ttqav2 台灣在地用語 culture +marketing_management 行銷管理 other +business_management 企業管理 other +organic_chemistry 有機化學 chemistry +advance_chemistry 化學 chemistry +physics 物理 physics +secondary_physics 高中物理 physics +human_behavior 人類行為與社會 psychology +national_protection 軍事 politics +jce_humanities 指考人文科目 philosophy +linear_algebra 線代 math +politic_science 政治 politics +agriculture 農業 other +official_document_management 機關文書 other +financial_analysis 財務分析 business +pharmacy 藥劑學 biology +educational_psychology 教育心理 psychology +statistics_and_machine_learning 統計與機器學習 engineering +management_accounting 管理會計 business +introduction_to_law 法律概論 law +computer_science 資訊工程 computer science +veterinary_pathology 獸醫病理學 health +accounting 會計學 business +fire_science 火災學 other +optometry 視光學 other +insurance_studies 保險學 other +pharmacology 藥理學 health +taxation 稅務 law +education_(profession_level) 教育專業 education +economics 經濟學 economics +veterinary_pharmacology 獸醫藥理學 health +nautical_science 航海 other +occupational_therapy_for_psychological_disorders 心理障礙職能治療學 psychology +trust_practice 信託實務 law +geography_of_taiwan 台灣地理 geography +physical_education 體育 education +auditing 審計學 business +administrative_law 行政法 law +basic_medical_science 基礎醫學 biology +macroeconomics 總經 economics +trade 貿易 business +chinese_language_and_literature 國文 culture +tve_design 統測_設計 other +junior_science_exam 國中會考基測自然科 biology +junior_math_exam 國中會考基測數學科 math +junior_chinese_exam 國中會考基測國文 culture +junior_social_studies 國中會考基測社會科 other +tve_mathematics 統測數學 math +tve_chinese_language 統測國文 culture +tve_natural_sciences 統測自然科 biology +junior_chemistry 國中理化 chemistry +music 音樂科 other +education 教育常識 education +three_principles_of_people 三民主義 culture +taiwanese_hokkien 閩南語 culture +engineering_math 工程數學 math diff --git a/lm-evaluation-harness/lm_eval/tasks/wmdp/README.md b/lm-evaluation-harness/lm_eval/tasks/wmdp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f6074d47102d60dbf6acc4408eb64ee4d379559f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wmdp/README.md @@ -0,0 +1,50 @@ +# WMDP + +### Paper + +Title: `The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning` + +Abstract: `https://arxiv.org/abs/2403.03218` + +`The Weapons of Mass Destruction Proxy (WMDP) benchmark is a dataset of 4,157 multiple-choice questions surrounding hazardous knowledge in biosecurity cybersecurity, and chemical security. WMDP serves as both a proxy evaluation for hazardous knowledge in large language models (LLMs) and a benchmark for unlearning methods to remove such knowledge.` + +Homepage: https://wmdp.ai + + +### Citation + +``` +@misc{li2024wmdp, + title={The WMDP Benchmark: Measuring and Reducing Malicious Use With Unlearning}, + author={Nathaniel Li and Alexander Pan and Anjali Gopal and Summer Yue and Daniel Berrios and Alice Gatti and Justin D. Li and Ann-Kathrin Dombrowski and Shashwat Goel and Long Phan and Gabriel Mukobi and Nathan Helm-Burger and Rassin Lababidi and Lennart Justen and Andrew B. Liu and Michael Chen and Isabelle Barrass and Oliver Zhang and Xiaoyuan Zhu and Rishub Tamirisa and Bhrugu Bharathi and Adam Khoja and Zhenqi Zhao and Ariel Herbert-Voss and Cort B. Breuer and Andy Zou and Mantas Mazeika and Zifan Wang and Palash Oswal and Weiran Liu and Adam A. Hunt and Justin Tienken-Harder and Kevin Y. Shih and Kemper Talley and John Guan and Russell Kaplan and Ian Steneker and David Campbell and Brad Jokubaitis and Alex Levinson and Jean Wang and William Qian and Kallol Krishna Karmakar and Steven Basart and Stephen Fitz and Mindy Levine and Ponnurangam Kumaraguru and Uday Tupakula and Vijay Varadharajan and Yan Shoshitaishvili and Jimmy Ba and Kevin M. Esvelt and Alexandr Wang and Dan Hendrycks}, + year={2024}, + eprint={2403.03218}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +### Groups and Tasks + +#### Groups + +* `wmdp`: All 4,157 multiple-choice questions in biosecurity, cybersecurity, and chemical security + +#### Tasks + +* `wmdp_bio`: 1,520 multiple-choice questions in biosecurity +* `wmdp_cyber`: 2,225 multiple-choice questions in cybersecurity +* `wmdp_chemistry`: 412 multiple-choice questions in chemical security + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/wmdp/_default_template_yaml b/lm-evaluation-harness/lm_eval/tasks/wmdp/_default_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e9d1c804bc2e248feaa1d132de6f0279f032d0c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wmdp/_default_template_yaml @@ -0,0 +1,16 @@ +dataset_path: cais/wmdp +group: wmdp +test_split: test +training_split: null +validation_split: null +num_fewshot: 0 +output_type: multiple_choice +doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:" +doc_to_choice: ["A", "B", "C", "D"] +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0 diff --git a/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_bio.yaml b/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_bio.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1096b6f873048709ea16b189c3a244856a2272c0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_bio.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_bio" +"dataset_name": "wmdp-bio" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about biology.\n\n" diff --git a/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_cyber.yaml b/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_cyber.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cac9ba825d719ac7a651ba24443ee6d7fa22567f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/wmdp/wmdp_cyber.yaml @@ -0,0 +1,4 @@ +"task": "wmdp_cyber" +"dataset_name": "wmdp-cyber" +"include": "_default_template_yaml" +"description": "The following are multiple choice questions (with answers) about cybersecurity.\n\n" diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Almaty b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Almaty new file mode 100644 index 0000000000000000000000000000000000000000..855abbd6e3ee338c3d46bee7b592f3d645d2c273 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Almaty differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Baku b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Baku new file mode 100644 index 0000000000000000000000000000000000000000..ae0ce4e7c3d273b537fa3653784e73be68ebb441 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Baku differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bangkok b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bangkok new file mode 100644 index 0000000000000000000000000000000000000000..fa799db39e7625dd74bd9caa5c29b4819a7cbd3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bangkok differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Beirut b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Beirut new file mode 100644 index 0000000000000000000000000000000000000000..fb266ede2279b6aff913538d9d5aae3935e53aeb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Beirut differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bishkek b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bishkek new file mode 100644 index 0000000000000000000000000000000000000000..547fd5e1bd16152073f7237ad265b5c643e7cbfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Bishkek differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Brunei b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Brunei new file mode 100644 index 0000000000000000000000000000000000000000..098c6a0b0afc2c3ef3a6937104d270074ff2a9b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Brunei differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Choibalsan b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Choibalsan new file mode 100644 index 0000000000000000000000000000000000000000..c5f4bb0b3858f7078b35cb7a63021d7dc0262605 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Choibalsan differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Dushanbe b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Dushanbe new file mode 100644 index 0000000000000000000000000000000000000000..89e875beaef780b2ceae08ee4703cd813f33d6a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Dushanbe differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Gaza b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Gaza new file mode 100644 index 0000000000000000000000000000000000000000..dd5781e8ae26ed66aa2650c580c561fae50df4b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Gaza differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Harbin b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Harbin new file mode 100644 index 0000000000000000000000000000000000000000..91f6f8bc2e234bafd484146986bdb289082c3588 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Harbin differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Hovd b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Hovd new file mode 100644 index 0000000000000000000000000000000000000000..8b9abca344daf199b554888fc4fff562a7a18e4a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Hovd differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Jakarta b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Jakarta new file mode 100644 index 0000000000000000000000000000000000000000..ec4bd5747a8c9c528dfd22c8e3171851784ade59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Jakarta differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kamchatka b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kamchatka new file mode 100644 index 0000000000000000000000000000000000000000..99776f515fd53a86a6886311d91ff33b401396c3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kamchatka differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kathmandu b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kathmandu new file mode 100644 index 0000000000000000000000000000000000000000..751cf4a8939e898f8abe4358a98b2a5f59f65e21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kathmandu differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuala_Lumpur b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuala_Lumpur new file mode 100644 index 0000000000000000000000000000000000000000..3d9f191e3acad56fb4f73ab44a74c50cdf91c12e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuala_Lumpur differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuching b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuching new file mode 100644 index 0000000000000000000000000000000000000000..098c6a0b0afc2c3ef3a6937104d270074ff2a9b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Kuching differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Macau b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Macau new file mode 100644 index 0000000000000000000000000000000000000000..cac65063d0dbf48e37c547fba3b67f34110d5a90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Macau differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Manila b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Manila new file mode 100644 index 0000000000000000000000000000000000000000..f4f4b04efa2b6a442d4072b3899f4dae69bdd771 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Manila differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Phnom_Penh b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Phnom_Penh new file mode 100644 index 0000000000000000000000000000000000000000..fa799db39e7625dd74bd9caa5c29b4819a7cbd3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Phnom_Penh differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Pontianak b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Pontianak new file mode 100644 index 0000000000000000000000000000000000000000..12ce24cbeae404efe6921081d21289be452ff88d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Pontianak differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Qyzylorda b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Qyzylorda new file mode 100644 index 0000000000000000000000000000000000000000..27b522a7d5e24eafdf29dd541ebbca69ce4db7b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Qyzylorda differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Riyadh b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Riyadh new file mode 100644 index 0000000000000000000000000000000000000000..8c8062471dce91a5be827d6908795ee7391a4afc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Riyadh differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Saigon b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Saigon new file mode 100644 index 0000000000000000000000000000000000000000..9c45ed991a230919d129c63c660aa8d1ad74d0ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Saigon differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Samarkand b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Samarkand new file mode 100644 index 0000000000000000000000000000000000000000..8a93767bfef2cecbeee15db413a0a7c277e761a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Samarkand differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Singapore b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Singapore new file mode 100644 index 0000000000000000000000000000000000000000..3d9f191e3acad56fb4f73ab44a74c50cdf91c12e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Singapore differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Tashkent b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Tashkent new file mode 100644 index 0000000000000000000000000000000000000000..a9f6cd93c849c8b1677bc54f8bb836c91b40d63d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Tashkent differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ujung_Pandang b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ujung_Pandang new file mode 100644 index 0000000000000000000000000000000000000000..556ba866933d37f3cfcf8042045d64e209bae30f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ujung_Pandang differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Urumqi b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Urumqi new file mode 100644 index 0000000000000000000000000000000000000000..62bdcac14db3f464ff561e32db2c6b55c0cb1866 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Urumqi differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ust-Nera b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ust-Nera new file mode 100644 index 0000000000000000000000000000000000000000..d05726aba9fd67bf230290b3e2d74b75e46ee214 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Ust-Nera differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yangon b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yangon new file mode 100644 index 0000000000000000000000000000000000000000..eef37b42e8a0e7179f8113bea01f4a71d668e8ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yangon differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yerevan b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yerevan new file mode 100644 index 0000000000000000000000000000000000000000..0d5f6853ac79c41fcb445a23062a27aca9e471f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Asia/Yerevan differ