diff --git a/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..dcabcb976d130c153d6de61b0aeeaa5d6de44dd2 --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:995b8b4d667bd43acf2b892d7812e368ae4198287ad15f38b6e77e7f99b13bf6 +size 33555533 diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..c19b47cdae40bbc0ff91236d2048992f314172f0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/_held_in_template_yaml @@ -0,0 +1,14 @@ +output_type: generate_until +test_split: null +doc_to_choice: null +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +generation_kwargs: + until: + - "" + do_sample: false + temperature: 0.0 +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5796713506e3b2e6632f4df0d60c4c19377693ad --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_in.yaml @@ -0,0 +1,331 @@ +group: flan_held_in +group_alias: Flan (Held-In) +task: + # ANLI R1 + - group: anli_r1_flan + group_alias: ANLI R1 + task: + - task: anli_r1 + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-7 + include: _held_in_template_yaml + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r1 + task_alias: prompt-8 + include: _held_in_template_yaml + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + # ANLI R2 + - group: anli_r2_flan + group_alias: ANLI R2 + task: + - task: anli_r2 + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-7 + include: _held_in_template_yaml + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r2 + task_alias: prompt-8 + include: _held_in_template_yaml + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + # ANLI R3 + - group: anli_r3_flan + group_alias: ANLI R3 + task: + - task: anli_r3 + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nChoose your answer: based on the paragraph above can we conclude that \"{{hypothesis}}\"?\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nI think the answer is" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nBased on that paragraph can we conclude that this sentence is true?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\n\nCan we draw the following conclusion?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nDoes this next sentence follow, given the preceding text?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "{{premise}}\nCan we infer the following?\n{{hypothesis}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nThe answer is:" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Read the following paragraph and determine if the hypothesis is true:\n\n{{premise}}\n\nOPTIONS:\n- Yes\n- It's impossible to say\n- No\nHypothesis: {{hypothesis}}\n\n\n" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "Read the text and determine if the sentence is true (see options at the end):\n\n{{premise}}\n\nSentence: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-7 + include: _held_in_template_yaml + doc_to_text: "Can we draw the following hypothesis from the context (see options)? \n\nContext:\n\n{{premise}}\n\nHypothesis: {{hypothesis}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + - task: anli_r3 + task_alias: prompt-8 + include: _held_in_template_yaml + doc_to_text: "Choose from options: Determine if the sentence is true based on the text below:\n{{hypothesis}}\n\n{{premise}}\nOPTIONS:\n- Yes\n- It's impossible to say\n- No" + doc_to_target: "{{[\"Yes\", \"It's impossible to say\", \"No\"][label]}}" + # Arc Easy + - group: arc_easy_flan + group_alias: Arc Easy + task: + - task: arc_easy + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_easy + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + # Arc Challenge + - group: arc_challenge_flan + group_alias: Arc Challenge + task: + - task: arc_challenge + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "Question: {{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}\nAnswer:" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "Question: {{question}}\n\nWhat is the correct answer to the question from the following choices?\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "Q: {{question}}\nWhat is the correct answer to this question?\nOPTIONS:\n- {{choices.text|join('\n- ')}}...A:" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "Choose your answer?\n\n{{question}}\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Answer the question\n\n{{question}}\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + - task: arc_challenge + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "{{question}}\n\nPick the answer from these options\n\nOPTIONS:\n- {{choices.text|join('\n- ')}}" + doc_to_target: "{{choices.text[choices.label.index(answerKey)]}}" + # BoolQ + - group: boolq_flan + group_alias: BoolQ + task: + - task: boolq + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\nCan we conclude that {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\nIs it true that {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\n{{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "Text: {{passage}}\n\nQuestion: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\nWhat's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\nBased on the above text what's the best answer to this question: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\nAnswer this question making sure that the answer is supposed by the text: {{question}}?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-7 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\nIs the following statement correct based on the text\n\n{{question}}\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-8 + include: _held_in_template_yaml + doc_to_text: "{{passage}}\n\nIs this statement correct \"{{question}}\"?\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + - task: boolq + task_alias: prompt-9 + include: _held_in_template_yaml + doc_to_text: "Is it true that {{question}} based on the following text?\n\n{{passage}}\n\nOPTIONS:\n- no\n- yes" + doc_to_target: "{{['no', 'yes'][label]}}" + # RTE + - group: rte_flan + group_alias: RTE + task: + - task: rte + task_alias: prompt-0 + include: _held_in_template_yaml + doc_to_text: "{{sentence1}}\n\nQuestion with options: Based on the paragraph above can we conclude that \"{{sentence2}}\"?\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-1 + include: _held_in_template_yaml + doc_to_text: "{{sentence1}}\n\nBased on that paragraph can we conclude that the sentence below is true?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-2 + include: _held_in_template_yaml + doc_to_text: "{{sentence1}}\n\nQ with options: Can we draw the following conclusion?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-3 + include: _held_in_template_yaml + doc_to_text: "{{sentence1}}\nDoes this next sentence follow, given the preceding text?\n{{sentence2}}\n\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-4 + include: _held_in_template_yaml + doc_to_text: "{{sentence1}}\nOPTIONS:\n- yes\n- no\nQuestion: Can we infer the following?\n{{sentence2}}" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-5 + include: _held_in_template_yaml + doc_to_text: "Read the following paragraph and determine if the hypothesis is true. Select from options at the end:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nThe answer is" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-6 + include: _held_in_template_yaml + doc_to_text: "Read the text and determine if the sentence is true:\n\n{{sentence1}}\n\nSentence: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-7 + include: _held_in_template_yaml + doc_to_text: "Question with options: can we draw the following hypothesis from the context? \n\nContext:\n\n{{sentence1}}\n\nHypothesis: {{sentence2}}\nOPTIONS:\n- yes\n- no\nA:" + doc_to_target: "{{['yes', 'no'][label]}}" + - task: rte + task_alias: prompt-8 + include: _held_in_template_yaml + doc_to_text: "Determine if the sentence is true based on the text below. Choose from options.\n{{sentence2}}\n\n{{sentence1}}\nOPTIONS:\n- yes\n- no" + doc_to_target: "{{['yes', 'no'][label]}}" diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf806b882167dacc83e3baab67fe69d293de6ddc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/flan/flan_held_out.yaml @@ -0,0 +1,13 @@ +group: flan_held_out +task: + # BBH + - bbh_zeroshot + - bbh_fewshot + - bbh_cot_fewshot + - bbh_cot_zeroshot + # MMLU + - mmlu + - mmlu_flan_n_shot_generative + - mmlu_flan_n_shot_loglikelihood + - mmlu_flan_cot_zeroshot + - mmlu_flan_cot_fewshot diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6df3203e10fddd06bd2edcfb97984c12a32466be --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/minerva_math.yaml @@ -0,0 +1,9 @@ +group: minerva_math +task: + - minerva_math_algebra + - minerva_math_counting_and_prob + - minerva_math_geometry + - minerva_math_intermediate_algebra + - minerva_math_num_theory + - minerva_math_prealgebra + - minerva_math_precalc diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md b/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de694e47ebeecf52c6d95038019a7ea17a623e52 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/README.md @@ -0,0 +1,43 @@ +# MultiMedQA (multiple-choice subset) + +### Paper + +Title: Large Language Models Encode Clinical Knowledge + +Abstract: https://arxiv.org/abs/2212.13138 + +A benchmark combining four existing multiple-choice question answering datasets spanning professional medical exams and research queries. + +### Citation + +``` +@Article{Singhal2023, +author={Singhal, Karan and Azizi, Shekoofeh and Tu, Tao and Mahdavi, S. Sara and Wei, Jason and Chung, Hyung Won and Scales, Nathan and Tanwani, Ajay and Cole-Lewis, Heather and Pfohl, Stephen and Payne, Perry and Seneviratne, Martin and Gamble, Paul and Kelly, Chris and Babiker, Abubakr and Sch{\"a}rli, Nathanael and Chowdhery, Aakanksha and Mansfield, Philip and Demner-Fushman, Dina and Ag{\"u}era y Arcas, Blaise and Webster, Dale and Corrado, Greg S. and Matias, Yossi and Chou, Katherine and Gottweis, Juraj and Tomasev, Nenad and Liu, Yun and Rajkomar, Alvin and Barral, Joelle and Semturs, Christopher and Karthikesalingam, Alan and Natarajan, Vivek}, +title={Large language models encode clinical knowledge}, +journal={Nature}, +year={2023}, +month={Aug}, +day={01}, +volume={620}, +number={7972}, +pages={172-180}, +issn={1476-4687}, +doi={10.1038/s41586-023-06291-2}, +url={https://doi.org/10.1038/s41586-023-06291-2} +} +``` + +### Tasks + +* [PubMedQA](https://pubmedqa.github.io/) - 1,000 expert-labeled Q&A pairs where a question and corresponding PubMed abstract as context is given and the a yes/maybe/no answer must be produced. Unlike the rest of the tasks in this suite, PubMedQA is a closed-domain Q&A task. +* [MedQA](https://github.com/jind11/MedQA) - US Medical License Exam (USMLE) questions with 4 or 5 possible answers. Typically, only the 4-option questions are used. +* [MedMCQA](https://medmcqa.github.io/) - 4-option multiple choice questions from Indian medical entrance examinations, >191k total questions. +* [MMLU](https://arxiv.org/abs/2009.03300) - 4-option multiple choice exam questions from a variety of domains. The following 6 domains are utilized here: + * Anatomy + * Clinical Knowledge + * College Medicine + * Medical Genetics + * Professional Medicine + * College Biology + +Note that MultiMedQA also includes some short-form and long-form Q&A tasks (LiveQA, MedicationQA, HealthSearchQA). Evaluation on these tasks is usually done by experts and is not typically performed automatically, and therefore is ignored here. diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..29810bb491105b4a4e9d01391926a03c0fc8e88c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml @@ -0,0 +1,17 @@ +group: multimedqa +task: + - pubmedqa + - medmcqa + - medqa_4options + - task: mmlu_anatomy + task_alias: "anatomy (mmlu)" + - task: mmlu_clinical_knowledge + task_alias: "clinical_knowledge (mmlu)" + - task: mmlu_college_medicine + task_alias: "college_medicine (mmlu)" + - task: mmlu_medical_genetics + task_alias: "medical_genetics (mmlu)" + - task: mmlu_professional_medicine + task_alias: "professional_medicine (mmlu)" + - task: mmlu_college_biology + task_alias: "college_biology (mmlu)" diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0296a0a548e1206f70627b4176d79aab7438db75 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/openllm.yaml @@ -0,0 +1,18 @@ +group: openllm +group_alias: Open LLM Leaderboard +task: + - task: arc_challenge + fewshot_split: validation + num_fewshot: 25 + - task: hellaswag + fewshot_split: train + num_fewshot: 10 + - task: truthfulqa + num_fewshot: 0 + - task: mmlu + num_fewshot: 5 + - task: winogrande + fewshot_split: train + num_fewshot: 5 + - task: gsm8k + num_fewshot: 5 diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdeadd3ce995ce3d4d9340082ede3bf424ba276d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/pythia.yaml @@ -0,0 +1,12 @@ +group: pythia +task: + - lambada_openai + - logiqa + - piqa + - sciq + - wikitext + - winogrande + - wsc + - ai2_arc + - blimp + - mmlu diff --git a/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml b/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27e7adc41bd2eaffa20b3344cfdf83a52b4d65fc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/benchmarks/t0_eval.yaml @@ -0,0 +1,127 @@ +group: t0_eval +task: + # Coreference Resolution + - dataset_path: super_glue + dataset_name: wsc.fixed + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Coreference Resolution + - dataset_path: winogrande + dataset_name: winogrande_xl + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Natural Language Inference + - dataset_path: super_glue + dataset_name: cb + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - dataset_path: super_glue + dataset_name: rte + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r1 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r1 + validation_split: dev_r1 + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r2 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r2 + validation_split: dev_r2 + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - task: anli_r3 + dataset_path: anli + use_prompt: promptsource:* + training_split: train_r3 + validation_split: dev_r3 + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Sentence Completion + - dataset_path: super_glue + dataset_name: copa + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Natural Language Inference + - dataset_path: hellaswag + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + # Word Sense Disambiguation + - dataset_path: super_glue + dataset_name: wic + use_prompt: promptsource:* + training_split: train + validation_split: validation + output_type: generate_until + metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/README.md b/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6671bda477e4533204c8ba154323e40d3df23f79 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/README.md @@ -0,0 +1,48 @@ +# EusProficiency + +### Paper + +Title: Latxa: An Open Language Model and Evaluation Suite for Basque + +Abstract: https://arxiv.org/abs/2403.20266 + +EusProficiency comprises 5,169 exercises on different topics from past EGA exams, the official C1-level certificate of proficiency in Basque. We collected the atarikoa exercises from EGA exams through the years 1998 to 2008. Atarikoa is the first qualifying test of EGA, which measures different aspects of language competency, such as reading comprehension, grammar, vocabulary, spelling, and writing. Each test generally has 85 multiple-choice questions, with 4 choices and a single correct answer. + +Homepage: https://github.com/hitz-zentroa/latxa + + +### Citation + +``` +@misc{etxaniz2024latxa, + title={Latxa: An Open Language Model and Evaluation Suite for Basque}, + author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa}, + year={2024}, + eprint={2403.20266}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +There are no groups. + +#### Tasks + +* `eus_proficiency`: EusProficiency comprises 5,169 exercises on different topics from past EGA exams, the official C1-level certificate of proficiency in Basque. + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/eus_proficiency.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/eus_proficiency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..18cf5d2ab313a2ac907738185b5e39036402c7e2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_proficiency/eus_proficiency.yaml @@ -0,0 +1,16 @@ +dataset_path: HiTZ/EusProficiency +dataset_name: default +task: eus_proficiency +doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:" +doc_to_choice: ["A", "B", "C", "D"] +validation_split: null +test_split: test +fewshot_split: test +output_type: multiple_choice +doc_to_target: answer +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/README.md b/lm-evaluation-harness/lm_eval/tasks/haerae/README.md new file mode 100644 index 0000000000000000000000000000000000000000..108626ae34ba4deb88d22b2ca02f43c54d2fcb5d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/README.md @@ -0,0 +1,49 @@ +# HAE-RAE BENCH + +### Paper + +Title: `HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models` + +Abstract: `Large Language Models (LLMs) trained on massive corpora demonstrate impressive capabilities in a wide range of tasks. While there are ongoing efforts to adapt these models to languages beyond English, the attention given to their evaluation methodologies remains limited. Current multilingual benchmarks often rely on back translations or re-implementations of English tests, limiting their capacity to capture unique cultural and linguistic nuances. To bridge this gap for the Korean language, we introduce HAE-RAE Bench, a dataset curated to challenge models lacking Korean cultural and contextual depth. The dataset encompasses six downstream tasks across four domains: vocabulary, history, general knowledge, and reading comprehension. Contrary to traditional evaluation suites focused on token or sequence classification and specific mathematical or logical reasoning, HAE-RAE Bench emphasizes a model's aptitude for recalling Korean-specific knowledge and cultural contexts. Comparative analysis with prior Korean benchmarks indicates that the HAE-RAE Bench presents a greater challenge to non-native models, by disturbing abilities and knowledge learned from English being transferred.` + +Homepage: https://huggingface.co/datasets/HAERAE-HUB/HAE_RAE_BENCH + +### Citation + +@misc{son2023haerae, + title={HAE-RAE Bench: Evaluation of Korean Knowledge in Language Models}, + author={Guijin Son and Hanwool Lee and Suwan Kim and Huiseo Kim and Jaecheol Lee and Je Won Yeom and Jihyu Jung and Jung Woo Kim and Songseong Kim}, + year={2023}, + eprint={2309.02706}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} + +### Groups and Tasks + +#### Groups + +* `haerae`: 'It consists of five tasks provided in the HAERAE-BENCH paper. 'Reading Comprehension' was excluded from the implementation due to copyright issues. We will include it in the next haerae update. For other tasks, some part of data may be replaced or increased with the production of Haerae v1.1. Please note this when using it.' + +#### Tasks + +The following tasks evaluate subjects in the HaeRae dataset + +- `haerae_standard_nomenclature` +- `haerae_loan_word` +- `haerae_rare_word` +- `haerae_general_knowledge` +- `haerae_history` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml new file mode 100644 index 0000000000000000000000000000000000000000..c22e3d15d1c2e13754bf4902125f51021bd63d82 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/_default_haerae_yaml @@ -0,0 +1,17 @@ +group: haerae +dataset_path: HAERAE-HUB/HAE_RAE_BENCH +test_split: test +fewshot_split: test +output_type: multiple_choice +doc_to_text: "{{query}}" +doc_to_choice: ["(A)", "(B)", "(C)", "(D)", "(E)"] +doc_to_target: "{{answer}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ccd26109f7ad2660e6d2d167d5e2020b7a295b9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_gk.yaml @@ -0,0 +1,3 @@ +"dataset_name": "general_knowledge" +"include": "_default_haerae_yaml" +"task": "haerae_general_knowledge" diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15f77efdd1892f3c49784a2660e646d087c4668e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_hi.yaml @@ -0,0 +1,3 @@ +"dataset_name": "history" +"include": "_default_haerae_yaml" +"task": "haerae_history" diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af143afe7ddb5442e707e1215e2f3bf4f13d7a08 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_lw.yaml @@ -0,0 +1,3 @@ +"dataset_name": "loan_words" +"include": "_default_haerae_yaml" +"task": "haerae_loan_word" diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc8d182e61cd2deafdd5a34385132446370da90c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_rw.yaml @@ -0,0 +1,3 @@ +"dataset_name": "rare_words" +"include": "_default_haerae_yaml" +"task": "haerae_rare_word" diff --git a/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62603ba1c8e4f6f01d7028aa1dfe804c67ba3b99 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/haerae/haerae_sn.yaml @@ -0,0 +1,3 @@ +"dataset_name": "standard_nomenclature" +"include": "_default_haerae_yaml" +"task": "haerae_standard_nomenclature" diff --git a/lm-evaluation-harness/lm_eval/tasks/hellaswag/README.md b/lm-evaluation-harness/lm_eval/tasks/hellaswag/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9fdbac13581c06430b63248514b7cf5c9610c220 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hellaswag/README.md @@ -0,0 +1,49 @@ +# HellaSwag + +### Paper + +Title: `HellaSwag: Can a Machine Really Finish Your Sentence?` + +Abstract: https://arxiv.org/abs/1905.07830 + +Recent work by Zellers et al. (2018) introduced a new task of commonsense natural language inference: given an event description such as "A woman sits at a piano," a machine must select the most likely followup: "She sets her fingers on the keys." With the introduction of BERT, near human-level performance was reached. Does this mean that machines can perform human level commonsense inference? +In this paper, we show that commonsense inference still proves difficult for even state-of-the-art models, by presenting HellaSwag, a new challenge dataset. Though its questions are trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). We achieve this via Adversarial Filtering (AF), a data collection paradigm wherein a series of discriminators iteratively select an adversarial set of machine-generated wrong answers. AF proves to be surprisingly robust. The key insight is to scale up the length and complexity of the dataset examples towards a critical 'Goldilocks' zone wherein generated text is ridiculous to humans, yet often misclassified by state-of-the-art models. +Our construction of HellaSwag, and its resulting difficulty, sheds light on the inner workings of deep pretrained models. More broadly, it suggests a new path forward for NLP research, in which benchmarks co-evolve with the evolving state-of-the-art in an adversarial way, so as to present ever-harder challenges. + +Homepage: `https://rowanzellers.com/hellaswag/` + + +### Citation + +``` +@inproceedings{zellers2019hellaswag, + title={HellaSwag: Can a Machine Really Finish Your Sentence?}, + author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin}, + booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +- Not part of a group yet + +#### Tasks + +- `hellaswag` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc b/lm-evaluation-harness/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6e35e940fdfd94d07889c4929a6ea8e518ba459 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/hellaswag/hellaswag.yaml b/lm-evaluation-harness/lm_eval/tasks/hellaswag/hellaswag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec627da7d46ea6f31bd0ca68c60e21fd9332db9d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hellaswag/hellaswag.yaml @@ -0,0 +1,22 @@ +group: + - multiple_choice +task: hellaswag +dataset_path: hellaswag +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: null +process_docs: !function utils.process_docs +doc_to_text: "{{query}}" +doc_to_target: "{{label}}" +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/hellaswag/utils.py b/lm-evaluation-harness/lm_eval/tasks/hellaswag/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b526a9e93076f7db54221072d58ca4bd7161ee97 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/hellaswag/utils.py @@ -0,0 +1,25 @@ +import re + +import datasets + + +def preprocess(text): + text = text.strip() + # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag. + text = text.replace(" [title]", ". ") + text = re.sub("\\[.*?\\]", "", text) + text = text.replace(" ", " ") + return text + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc): + ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize() + out_doc = { + "query": preprocess(doc["activity_label"] + ": " + ctx), + "choices": [preprocess(ending) for ending in doc["endings"]], + "gold": int(doc["label"]), + } + return out_doc + + return dataset.map(_process_doc) diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada/README.md b/lm-evaluation-harness/lm_eval/tasks/lambada/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ac2b92b553c35a5dc070017b6bebb643e314d64e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada/README.md @@ -0,0 +1,39 @@ +# LAMBADA + +### Paper +Title: `The LAMBADA dataset: Word prediction requiring a broad discourse context` + +Abstract: https://arxiv.org/pdf/1606.06031.pdf + +LAMBADA is a dataset to evaluate the capabilities of computational models for text +understanding by means of a word prediction task. LAMBADA is a collection of narrative +passages sharing the characteristic that human subjects are able to guess their last +word if they are exposed to the whole passage, but not if they only see the last +sentence preceding the target word. To succeed on LAMBADA, computational models +cannot simply rely on local context, but must be able to keep track of information +in the broader discourse. + +Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI + +### Groups and Tasks + +#### Groups + +- `lambada` + +#### Tasks + +- `lambada_openai` +- `lambada_standard` + + +### Citation + +@misc{ + author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, + title={The LAMBADA dataset}, + DOI={10.5281/zenodo.2630551}, + publisher={Zenodo}, + year={2016}, + month={Aug} +} diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_openai.yaml b/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_openai.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9fd3a90d514a8650b6c87608cca40e409f60438 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_openai.yaml @@ -0,0 +1,22 @@ +group: + - lambada +task: lambada_openai +dataset_path: EleutherAI/lambada_openai +dataset_name: default +output_type: loglikelihood +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}}" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_standard.yaml b/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_standard.yaml new file mode 100644 index 0000000000000000000000000000000000000000..900e18116309391779684eb8c4ebe2903400b784 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada/lambada_standard.yaml @@ -0,0 +1,21 @@ +group: + - lambada +task: lambada_standard +dataset_path: lambada +dataset_name: null +output_type: loglikelihood +validation_split: validation +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}}" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9d33dce59bab7a4736b822d20cc973669d1b3874 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/README.md @@ -0,0 +1,56 @@ +# LAMBADA Cloze + +### Paper + +Title: `The LAMBADA dataset: Word prediction requiring a broad discourse context` + +Abstract: https://arxiv.org/abs/1606.06031 + +Cloze-style LAMBADA dataset. +LAMBADA is a dataset to evaluate the capabilities of computational models for text +understanding by means of a word prediction task. LAMBADA is a collection of narrative +passages sharing the characteristic that human subjects are able to guess their last +word if they are exposed to the whole passage, but not if they only see the last +sentence preceding the target word. To succeed on LAMBADA, computational models +cannot simply rely on local context, but must be able to keep track of information +in the broader discourse. + +Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI + + +### Citation + +``` +@misc{ + author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel}, + title={The LAMBADA dataset}, + DOI={10.5281/zenodo.2630551}, + publisher={Zenodo}, + year={2016}, + month={Aug} +} +``` + +### Groups and Tasks + +#### Groups + +* `lambada_cloze` + +#### Tasks + +* `lambada_openai_cloze_yaml` +* `lambada_standard_cloze_yaml` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d25e26d9efd926e79745c251cab1953dde1986bf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml @@ -0,0 +1,20 @@ +group: + - lambada_cloze +task: lambada_openai_cloze_yaml +dataset_path: EleutherAI/lambada_openai +dataset_name: default +output_type: loglikelihood +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7cde8fdebc6f85113c3f3548fcc6a0cbe71aaa7b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml @@ -0,0 +1,21 @@ +group: + - lambada_cloze +task: lambada_standard_cloze_yaml +dataset_path: lambada +dataset_name: null +output_type: loglikelihood +validation_split: validation +test_split: test +doc_to_text: "{{text.split(' ')[:-1]|join(' ')}} ____. ->" +doc_to_target: "{{' '+text.split(' ')[-1]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{text}}" +metric_list: + - metric: perplexity + aggregation: perplexity + higher_is_better: false + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90f3cc50b3f8e54b584237edfa50fcdc96d0e625 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml @@ -0,0 +1,8 @@ +"dataset_name": "abstract_algebra" +"description": "The following are multiple choice questions (with answers) about abstract\ + \ algebra.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_abstract_algebra" +"task_alias": "abstract_algebra" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea0d1fe2a68cb893759f3c550775ff3554f4fc13 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "business_ethics" +"description": "The following are multiple choice questions (with answers) about business\ + \ ethics.\n\n" +"group": "mmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "mmlu_business_ethics" +"task_alias": "business_ethics" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9786cc6ebd0d14936bb0138d322161b01a4574dd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml @@ -0,0 +1,8 @@ +"dataset_name": "college_computer_science" +"description": "The following are multiple choice questions (with answers) about college\ + \ computer science.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_college_computer_science" +"task_alias": "college_computer_science" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2154ab65454c9234b57d76782315c6559b0e7e0f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml @@ -0,0 +1,8 @@ +"dataset_name": "elementary_mathematics" +"description": "The following are multiple choice questions (with answers) about elementary\ + \ mathematics.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_elementary_mathematics" +"task_alias": "elementary_mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e9421c1a6f7271ddfc87ad099a914f81bc031e9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_chemistry" +"description": "The following are multiple choice questions (with answers) about high\ + \ school chemistry.\n\n" +"group": "mmlu_stem" +"group_alias": "stem" +"include": "_default_template_yaml" +"task": "mmlu_high_school_chemistry" +"task_alias": "high_school_chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4432fe4459fcaa3c1ccf311c094ceb9122d0637 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml @@ -0,0 +1,8 @@ +"dataset_name": "high_school_us_history" +"description": "The following are multiple choice questions (with answers) about high\ + \ school us history.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_high_school_us_history" +"task_alias": "high_school_us_history" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e16de5c40b2a29cfc0cadeaad5ed11df9b7503e8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml @@ -0,0 +1,8 @@ +"dataset_name": "jurisprudence" +"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\ + \n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_jurisprudence" +"task_alias": "jurisprudence" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b12057bc78ffc195b4d016bfdc1c931f08e6267 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml @@ -0,0 +1,8 @@ +"dataset_name": "logical_fallacies" +"description": "The following are multiple choice questions (with answers) about logical\ + \ fallacies.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_logical_fallacies" +"task_alias": "logical_fallacies" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2df1a1ddb6302f7e563852fdd56cdb0d1668dad1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml @@ -0,0 +1,8 @@ +"dataset_name": "moral_disputes" +"description": "The following are multiple choice questions (with answers) about moral\ + \ disputes.\n\n" +"group": "mmlu_humanities" +"group_alias": "humanities" +"include": "_default_template_yaml" +"task": "mmlu_moral_disputes" +"task_alias": "moral_disputes" diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df70fbb2de2f40a5cb10a9093d4a0243e45c6ffc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml @@ -0,0 +1,8 @@ +"dataset_name": "nutrition" +"description": "The following are multiple choice questions (with answers) about nutrition.\n\ + \n" +"group": "mmlu_other" +"group_alias": "other" +"include": "_default_template_yaml" +"task": "mmlu_nutrition" +"task_alias": "nutrition" diff --git a/lm-evaluation-harness/lm_eval/tasks/nq_open/README.md b/lm-evaluation-harness/lm_eval/tasks/nq_open/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation-harness/lm_eval/tasks/nq_open/nq_open.yaml b/lm-evaluation-harness/lm_eval/tasks/nq_open/nq_open.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0464ca3abc61b1d8f47b088a7f722948044bdc13 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/nq_open/nq_open.yaml @@ -0,0 +1,32 @@ +task: nq_open +dataset_path: nq_open +output_type: generate_until +training_split: train +validation_split: validation +description: "Answer these questions:\n\n" +doc_to_text: "Q: {{question}}?\nA:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n" +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\\b(?:The |the |An |A |The |a |an )" +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py b/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..a1341fec89b52f3b0e9e7e778825b0d774117174 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/paws-x/_generate_config.py @@ -0,0 +1,109 @@ +import argparse + +import yaml + + +# Different languages that are part of xnli. +# These correspond to dataset names (Subsets) on HuggingFace. +# A yaml file is generated by this script for each language. + +LANGUAGES = { + "de": { # German + "QUESTION_WORD": "richtig", + "YES": "Ja", + "NO": "Nein", + }, + "en": { # English + "QUESTION_WORD": "right", + "YES": "Yes", + "NO": "No", + }, + "es": { # Spanish + "QUESTION_WORD": "verdad", + "YES": "Sí", + "NO": "No", + }, + "fr": { # French + "QUESTION_WORD": "n'est-ce pas", + "YES": "Oui", + "NO": "No", + }, + "ja": { # Japanese + "QUESTION_WORD": "ですね", + "YES": "はい", + "NO": "いいえ", + }, + "ko": { # Korean + "QUESTION_WORD": "맞죠", + "YES": "예", + "NO": "아니요", + }, + "zh": { # Chinese + "QUESTION_WORD": "对吧", + "YES": "是", + "NO": "不是", + }, +} + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + file_name = f"paws_{lang}.yaml" + try: + QUESTION_WORD = LANGUAGES[lang]["QUESTION_WORD"] + YES = LANGUAGES[lang]["YES"] + NO = LANGUAGES[lang]["NO"] + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "pawsx_template_yaml", + "dataset_name": lang, + "task": f"paws_{lang}", + "doc_to_text": "", + "doc_to_choice": f"{{{{[" + f"""sentence1+\", {QUESTION_WORD}? {YES}, \"+sentence2,""" + f""" sentence1+\", {QUESTION_WORD}? {NO}, \"+sentence2""" + f"]}}}}", + }, + f, + allow_unicode=True, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml b/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d9ffad3b000727764c69e7eef3596d4d3b0762f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_de.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: de +doc_to_choice: '{{[sentence1+", richtig? Ja, "+sentence2, sentence1+", richtig? Nein, + "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_de diff --git a/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml b/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc7034415496efcaffc50e988bd5f5f359c4fb2a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/paws-x/paws_ko.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: ko +doc_to_choice: '{{[sentence1+", 맞죠? 예, "+sentence2, sentence1+", 맞죠? 아니요, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_ko diff --git a/lm-evaluation-harness/lm_eval/tasks/storycloze/README.md b/lm-evaluation-harness/lm_eval/tasks/storycloze/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e7f8ac331a822182cef28add37f7c29cf7cc80b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/storycloze/README.md @@ -0,0 +1,73 @@ +# StoryCloze + +### Paper + +Title: `Few-shot Learning with Multilingual Language Models` +Abstract: `https://arxiv.org/abs/2112.10668` + +XStoryCloze consists of the professionally translated version of the [English StoryCloze dataset](https://cs.rochester.edu/nlp/rocstories/) (Spring 2016 version) to 10 non-English languages. This dataset is released by Meta AI. + +Homepage: https://github.com/facebookresearch/fairseq/pull/4820 + + +### Citation + +``` +@article{DBLP:journals/corr/abs-2112-10668, + author = {Xi Victoria Lin and + Todor Mihaylov and + Mikel Artetxe and + Tianlu Wang and + Shuohui Chen and + Daniel Simig and + Myle Ott and + Naman Goyal and + Shruti Bhosale and + Jingfei Du and + Ramakanth Pasunuru and + Sam Shleifer and + Punit Singh Koura and + Vishrav Chaudhary and + Brian O'Horo and + Jeff Wang and + Luke Zettlemoyer and + Zornitsa Kozareva and + Mona T. Diab and + Veselin Stoyanov and + Xian Li}, + title = {Few-shot Learning with Multilingual Language Models}, + journal = {CoRR}, + volume = {abs/2112.10668}, + year = {2021}, + url = {https://arxiv.org/abs/2112.10668}, + eprinttype = {arXiv}, + eprint = {2112.10668}, + timestamp = {Tue, 04 Jan 2022 15:59:27 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-2112-10668.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +### Groups and Tasks + +#### Groups + +* `storycloze` + +#### Tasks + +* `storycloze_2016` +* `storycloze_2018` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2016.yaml b/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2016.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df1c2629cbb15e070bd7e6954d5b528ccc8f030d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2016.yaml @@ -0,0 +1,18 @@ +group: storycloze +task: storycloze_2016 +dataset_path: story_cloze +dataset_name: 2016 +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +doc_to_target: "{{answer_right_ending-1}}" +doc_to_choice: "{{[sentence_quiz1, sentence_quiz2]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2018.yaml b/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2018.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa630efc73c866366175588a378beb28568d884d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/storycloze/storycloze_2018.yaml @@ -0,0 +1,16 @@ +group: storycloze +task: storycloze_2018 +dataset_path: story_cloze +dataset_name: 2018 +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +doc_to_target: "{{answer_right_ending-1}}" +doc_to_choice: "{{[sentence_quiz1, sentence_quiz2]}}" +should_decontaminate: true +doc_to_decontamination_query: "{{[input_sentence_1, input_sentence_2, input_sentence_3, input_sentence_4]|join(' ')}}" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation-harness/lm_eval/tasks/triviaqa/README.md b/lm-evaluation-harness/lm_eval/tasks/triviaqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1722b709886b938ded164ad0eee260a2e0f6b78e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/triviaqa/README.md @@ -0,0 +1,51 @@ +# Trivia QA + +### Paper + +Title: `TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension` +Abstract: https://arxiv.org/abs/1705.03551 + +TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence +triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts +and independently gathered evidence documents, six per question on average, that provide +high quality distant supervision for answering the questions. + +Homepage: https://nlp.cs.washington.edu/triviaqa/ + + +### Citation + +``` +@InProceedings{JoshiTriviaQA2017, + author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke}, + title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}, + booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics}, + month = {July}, + year = {2017}, + address = {Vancouver, Canada}, + publisher = {Association for Computational Linguistics}, +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `triviaqa`: `Generate and answer based on the question.` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/triviaqa/default.yaml b/lm-evaluation-harness/lm_eval/tasks/triviaqa/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a895fe7eb48f1fdef578606ebc95bbc7ab0f75ca --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/triviaqa/default.yaml @@ -0,0 +1,31 @@ +task: triviaqa +dataset_path: trivia_qa +dataset_name: rc.nocontext +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: "Question: {{question}}?\nAnswer:" +doc_to_target: "{{answer.aliases}}" +should_decontaminate: true +doc_to_decontamination_query: question +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 3.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/README.md b/lm-evaluation-harness/lm_eval/tasks/xnli/README.md new file mode 100644 index 0000000000000000000000000000000000000000..512f9cc828bae447accbac974ca3bd322202b29e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/README.md @@ -0,0 +1,78 @@ +# XNLI + +### Paper + +Title: `XNLI: Evaluating Cross-lingual Sentence Representations` + +Abstract: https://arxiv.org/abs/1809.05053 + +Based on the implementation of @yongzx (see https://github.com/EleutherAI/lm-evaluation-harness/pull/258) + +Prompt format (same as XGLM and mGPT): + +sentence1 + ", right? " + mask = (Yes|Also|No) + ", " + sentence2 + +Predicition is the full sequence with the highest likelihood. + +Language specific prompts are translated word-by-word with Google Translate +and may differ from the ones used by mGPT and XGLM (they do not provide their prompts). + +Homepage: https://github.com/facebookresearch/XNLI + + +### Citation + +""" +@InProceedings{conneau2018xnli, + author = "Conneau, Alexis + and Rinott, Ruty + and Lample, Guillaume + and Williams, Adina + and Bowman, Samuel R. + and Schwenk, Holger + and Stoyanov, Veselin", + title = "XNLI: Evaluating Cross-lingual Sentence Representations", + booktitle = "Proceedings of the 2018 Conference on Empirical Methods + in Natural Language Processing", + year = "2018", + publisher = "Association for Computational Linguistics", + location = "Brussels, Belgium", +} +""" + +### Groups and Tasks + +#### Groups + +* `xnli` + +#### Tasks + +* `xnli_ar`: Arabic +* `xnli_bg`: Bulgarian +* `xnli_de`: German +* `xnli_el`: Greek +* `xnli_en`: English +* `xnli_es`: Spanish +* `xnli_fr`: French +* `xnli_hi`: Hindi +* `xnli_ru`: Russian +* `xnli_sw`: Swahili +* `xnli_th`: Thai +* `xnli_tr`: Turkish +* `xnli_ur`: Urdu +* `xnli_vi`: Vietnamese +* `xnli_zh`: Chinese + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/utils.py b/lm-evaluation-harness/lm_eval/tasks/xnli/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2844d1d7c85a34c55f15893f3507601c54728a30 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/utils.py @@ -0,0 +1,166 @@ +import argparse + +import yaml + + +# Different languages that are part of xnli. +# These correspond to dataset names (Subsets) on HuggingFace. +# A yaml file is generated by this script for each language. + +LANGUAGES = { + "ar": { # Arabic + "QUESTION_WORD": "صحيح", + "ENTAILMENT_LABEL": "نعم", + "NEUTRAL_LABEL": "لذا", + "CONTRADICTION_LABEL": "رقم", + }, + "bg": { # Bulgarian + "QUESTION_WORD": "правилно", + "ENTAILMENT_LABEL": "да", + "NEUTRAL_LABEL": "така", + "CONTRADICTION_LABEL": "не", + }, + "de": { # German + "QUESTION_WORD": "richtig", + "ENTAILMENT_LABEL": "Ja", + "NEUTRAL_LABEL": "Auch", + "CONTRADICTION_LABEL": "Nein", + }, + "el": { # Greek + "QUESTION_WORD": "σωστός", + "ENTAILMENT_LABEL": "Ναί", + "NEUTRAL_LABEL": "Έτσι", + "CONTRADICTION_LABEL": "όχι", + }, + "en": { # English + "QUESTION_WORD": "right", + "ENTAILMENT_LABEL": "Yes", + "NEUTRAL_LABEL": "Also", + "CONTRADICTION_LABEL": "No", + }, + "es": { # Spanish + "QUESTION_WORD": "correcto", + "ENTAILMENT_LABEL": "Sí", + "NEUTRAL_LABEL": "Asi que", + "CONTRADICTION_LABEL": "No", + }, + "fr": { # French + "QUESTION_WORD": "correct", + "ENTAILMENT_LABEL": "Oui", + "NEUTRAL_LABEL": "Aussi", + "CONTRADICTION_LABEL": "Non", + }, + "hi": { # Hindi + "QUESTION_WORD": "सही", + "ENTAILMENT_LABEL": "हाँ", + "NEUTRAL_LABEL": "इसलिए", + "CONTRADICTION_LABEL": "नहीं", + }, + "ru": { # Russian + "QUESTION_WORD": "правильно", + "ENTAILMENT_LABEL": "Да", + "NEUTRAL_LABEL": "Так", + "CONTRADICTION_LABEL": "Нет", + }, + "sw": { # Swahili + "QUESTION_WORD": "sahihi", + "ENTAILMENT_LABEL": "Ndiyo", + "NEUTRAL_LABEL": "Hivyo", + "CONTRADICTION_LABEL": "Hapana", + }, + "th": { # Thai + "QUESTION_WORD": "ถูกต้อง", + "ENTAILMENT_LABEL": "ใช่", + "NEUTRAL_LABEL": "ดังนั้น", + "CONTRADICTION_LABEL": "ไม่", + }, + "tr": { # Turkish + "QUESTION_WORD": "doğru", + "ENTAILMENT_LABEL": "Evet", + "NEUTRAL_LABEL": "Böylece", + "CONTRADICTION_LABEL": "Hayır", + }, + "ur": { # Urdu + "QUESTION_WORD": "صحیح", + "ENTAILMENT_LABEL": "جی ہاں", + "NEUTRAL_LABEL": "اس لئے", + "CONTRADICTION_LABEL": "نہیں", + }, + "vi": { # Vietnamese + "QUESTION_WORD": "đúng", + "ENTAILMENT_LABEL": "Vâng", + "NEUTRAL_LABEL": "Vì vậy", + "CONTRADICTION_LABEL": "Không", + }, + "zh": { # Chinese + "QUESTION_WORD": "正确", + "ENTAILMENT_LABEL": "是的", + "NEUTRAL_LABEL": "所以", + "CONTRADICTION_LABEL": "不是的", + }, +} + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + file_name = f"xnli_{lang}.yaml" + try: + QUESTION_WORD = LANGUAGES[lang]["QUESTION_WORD"] + ENTAILMENT_LABEL = LANGUAGES[lang]["ENTAILMENT_LABEL"] + NEUTRAL_LABEL = LANGUAGES[lang]["NEUTRAL_LABEL"] + CONTRADICTION_LABEL = LANGUAGES[lang]["CONTRADICTION_LABEL"] + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "xnli_common_yaml", + "dataset_name": lang, + "task": f"xnli_{lang}", + "doc_to_text": "", + "doc_to_choice": f"{{{{[" + f"""premise+\", {QUESTION_WORD}? {ENTAILMENT_LABEL}, \"+hypothesis,""" + f"""premise+\", {QUESTION_WORD}? {NEUTRAL_LABEL}, \"+hypothesis,""" + f"""premise+\", {QUESTION_WORD}? {CONTRADICTION_LABEL}, \"+hypothesis""" + f"]}}}}", + }, + f, + allow_unicode=True, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_bg.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_bg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..939fe28186ab382300ad0bb410b31c2d5c1527a5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_bg.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: bg +doc_to_choice: '{{[premise+", правилно? да, "+hypothesis,premise+", правилно? така, + "+hypothesis,premise+", правилно? не, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_bg diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_common_yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..0201459d35817d6b08b44edaf3358d706de5c20f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_common_yaml @@ -0,0 +1,19 @@ +# This file will be included in the generated language-specific task configs. +# It doesn't have a yaml file extension as it is not meant to be imported directly +# by the harness. +group: xnli +task: null +dataset_path: xnli +dataset_name: null +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: null +doc_to_target: label +doc_to_choice: null +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_el.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_el.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5311292ec60d0611aa40b5bdb2174ffc8d275582 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_el.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: el +doc_to_choice: '{{[premise+", σωστός? Ναί, "+hypothesis,premise+", σωστός? Έτσι, "+hypothesis,premise+", + σωστός? όχι, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_el diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_es.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b00a8d9e3bb7b172cc73ea8f4fd4e07f6534da1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_es.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: es +doc_to_choice: '{{[premise+", correcto? Sí, "+hypothesis,premise+", correcto? Asi + que, "+hypothesis,premise+", correcto? No, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_es diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_fr.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52aee51fc3d8aab224cf18f84da04fd73879a1be --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_fr.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: fr +doc_to_choice: '{{[premise+", correct? Oui, "+hypothesis,premise+", correct? Aussi, + "+hypothesis,premise+", correct? Non, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_fr diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_hi.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_hi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..823872ce1c5a14e42ba106b9046a7b3bb060d366 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_hi.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: hi +doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+", + सही? नहीं, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_hi diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ru.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e95af2a1788f1b361d51349fa23f278f176e84b7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ru.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: ru +doc_to_choice: '{{[premise+", правильно? Да, "+hypothesis,premise+", правильно? Так, + "+hypothesis,premise+", правильно? Нет, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_ru diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_sw.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dc09130efc60df0bc7d5a026b0331b635ef4018 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_sw.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: sw +doc_to_choice: '{{[premise+", sahihi? Ndiyo, "+hypothesis,premise+", sahihi? Hivyo, + "+hypothesis,premise+", sahihi? Hapana, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_sw diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_tr.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_tr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..552eae1f79a6ee641151aaa8211d4c67fff072a8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_tr.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: tr +doc_to_choice: '{{[premise+", doğru? Evet, "+hypothesis,premise+", doğru? Böylece, + "+hypothesis,premise+", doğru? Hayır, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_tr diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ur.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..02fa3aa10268a035323163e36a03b3a0f79af314 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_ur.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: ur +doc_to_choice: '{{[premise+", صحیح? جی ہاں, "+hypothesis,premise+", صحیح? اس لئے, + "+hypothesis,premise+", صحیح? نہیں, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_ur diff --git a/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_vi.yaml b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_vi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74688ff328a221c567483a22dc6390ce512ae197 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/xnli/xnli_vi.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: vi +doc_to_choice: '{{[premise+", đúng? Vâng, "+hypothesis,premise+", đúng? Vì vậy, "+hypothesis,premise+", + đúng? Không, "+hypothesis]}}' +doc_to_text: '' +include: xnli_common_yaml +task: xnli_vi diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Abidjan b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Abidjan new file mode 100644 index 0000000000000000000000000000000000000000..28b32ab2e0b9053f39a91d9f28b6072e41423954 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Abidjan differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Asmera b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Asmera new file mode 100644 index 0000000000000000000000000000000000000000..9dcfc19c56e62b12b730f4335b34479695f273f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Asmera differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bangui b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bangui new file mode 100644 index 0000000000000000000000000000000000000000..afb6a4a8fb17b0d4670b8ea1b38f5cc6100244e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bangui differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Banjul b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Banjul new file mode 100644 index 0000000000000000000000000000000000000000..28b32ab2e0b9053f39a91d9f28b6072e41423954 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Banjul differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bissau b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bissau new file mode 100644 index 0000000000000000000000000000000000000000..82ea5aaf0c6ae2b3ec582013b6d16e6d6f29eb0a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bissau differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Blantyre b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Blantyre new file mode 100644 index 0000000000000000000000000000000000000000..52753c0f87bbfa457ada89d400908a3d6537ac0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Blantyre differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bujumbura b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bujumbura new file mode 100644 index 0000000000000000000000000000000000000000..52753c0f87bbfa457ada89d400908a3d6537ac0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Bujumbura differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Freetown b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Freetown new file mode 100644 index 0000000000000000000000000000000000000000..28b32ab2e0b9053f39a91d9f28b6072e41423954 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Freetown differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Harare b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Harare new file mode 100644 index 0000000000000000000000000000000000000000..52753c0f87bbfa457ada89d400908a3d6537ac0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Harare differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Malabo b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Malabo new file mode 100644 index 0000000000000000000000000000000000000000..afb6a4a8fb17b0d4670b8ea1b38f5cc6100244e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Malabo differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Mbabane b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Mbabane new file mode 100644 index 0000000000000000000000000000000000000000..b1c425daced454f53d7d18fea807bf8d081cf97e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Mbabane differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Ndjamena b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Ndjamena new file mode 100644 index 0000000000000000000000000000000000000000..a968845e29b8b2b47d4a73f74ae04ef681d7d485 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Ndjamena differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Sao_Tome b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Sao_Tome new file mode 100644 index 0000000000000000000000000000000000000000..59f3759c409a1fb50e632ef5ef613d3fee7af7ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Sao_Tome differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Tunis b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Tunis new file mode 100644 index 0000000000000000000000000000000000000000..427fa563033fdd8533ae56337fa20befe9719b42 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Africa/Tunis differ