diff --git a/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..57b376abb7f4b702a845f01d63856820aac39bb5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:145e1a59b6b8523dc36b42d39827558c9c0d121a5c139f9bafbf0d57172cea25 +size 9387 diff --git a/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..4a73bdbcad5df1072af99a9f1d744c343ea4812f --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a516be8c3ce27df2b9ff05fc136a56996835fc85a46658f6d1eaebcb4bb6e88f +size 9293 diff --git a/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml b/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..471bd4b4449f280412d9ee69566d4f80fd623671 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_2dm +dataset_name: arithmetic_2dm +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml b/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37f9ff0d2536d6c55c3e0f1676fe8218395d7b6c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml @@ -0,0 +1,5 @@ +include: arithmetic_1dc.yaml +task: arithmetic_3ds +dataset_name: arithmetic_3ds +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/drop/README.md b/lm-evaluation-harness/lm_eval/tasks/drop/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6b7fc47b7165034bd74c524048f5f54ea8d041cf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/drop/README.md @@ -0,0 +1,53 @@ +# DROP + +### Paper + +Title: `DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs` + +Abstract: https://aclanthology.org/attachments/N19-1246.Supplementary.pdf + +DROP is a QA dataset which tests comprehensive understanding of paragraphs. In +this crowdsourced, adversarially-created, 96k question-answering benchmark, a +system must resolve multiple references in a question, map them onto a paragraph, +and perform discrete operations over them (such as addition, counting, or sorting). + +Homepage: https://allenai.org/data/drop + +Acknowledgement: This implementation is based on the official evaluation for `DROP`: +https://github.com/allenai/allennlp-reading-comprehension/blob/master/allennlp_rc/eval/drop_eval.py + +### Citation + +``` +@misc{dua2019drop, + title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs}, + author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner}, + year={2019}, + eprint={1903.00161}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `drop` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/drop/default.yaml b/lm-evaluation-harness/lm_eval/tasks/drop/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a936121524950e8a89822058cb2b29f244f31a4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/drop/default.yaml @@ -0,0 +1,26 @@ +task: drop +dataset_path: EleutherAI/drop +output_type: generate_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs +doc_to_text: "{{passage}} {{question}}" +doc_to_target: "{{ answer|join(',')}}" +target_delimiter: "" +process_results: !function utils.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{passage}} {{question}}" +generation_kwargs: + until: + - "." +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: f1 + aggregation: mean + higher_is_better: true +metadata: + version: 3.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/drop/utils.py b/lm-evaluation-harness/lm_eval/tasks/drop/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..54093bb4d28e954035e76d8764a014ca99b99d8d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/drop/utils.py @@ -0,0 +1,204 @@ +import re +import string + +import numpy as np +from scipy.optimize import linear_sum_assignment + + +_ARTICLES = re.compile(r"\b(a|an|the)\b", re.UNICODE) + + +def process_docs(dataset): + def _process(doc): + return { + "id": doc["query_id"], + "passage": doc["passage"], + "question": doc["question"], + "answers": get_answers(doc), + } + + return dataset.map(_process) + + +def get_answers(doc): + def _flatten_validated_answers(validated_answers): + """Flattens a dict of lists of validated answers. + {"number": ['1', '8'], ...} + -> [{"number": ['1'], ...}, {"number": ['8'], ...}] + """ + valid_answers = [] + for i in range(len(validated_answers["number"])): + valid_answers.append( + { + "number": validated_answers["number"][i], + "date": validated_answers["date"][i], + "spans": validated_answers["spans"][i], + } + ) + return valid_answers + + answers = [] + answers_set = set() + candidates = [doc["answer"]] + _flatten_validated_answers(doc["validated_answers"]) + for candidate in candidates: + answer = parse_answer(candidate) + if answer in answers_set: + continue + answers_set.add(answer) + answers.append(answer) + return answers + + +def parse_answer(answer): + # NOTE: Everything is returned as a tuple for uniformity and hashability. + if answer["number"] != "": + return (str(answer["number"]),) + if answer["spans"] != []: + return tuple(answer["spans"]) + return ( + " ".join( + [answer["date"]["day"], answer["date"]["month"], answer["date"]["year"]] + ).strip(), + ) + + +def process_results(doc, results): + preds, golds = results, doc["answers"] + max_em = 0 + max_f1 = 0 + for gold_answer in golds: + exact_match, f1_score = get_metrics(preds, gold_answer) + if gold_answer[0].strip(): + max_em = max(max_em, exact_match) + max_f1 = max(max_f1, f1_score) + return {"em": max_em, "f1": max_f1} + + +def get_metrics(predicted, gold): + """ + Takes a predicted answer and a gold answer (that are both either a string or a list of + strings), and returns exact match and the DROP F1 metric for the prediction. If you are + writing a script for evaluating objects in memory (say, the output of predictions during + validation, or while training), this is the function you want to call, after using + :func:`answer_json_to_strings` when reading the gold answer from the released data file. + """ + predicted_bags = _answer_to_bags(predicted) + gold_bags = _answer_to_bags(gold) + + if set(predicted_bags[0]) == set(gold_bags[0]) and len(predicted_bags[0]) == len( + gold_bags[0] + ): + exact_match = 1.0 + else: + exact_match = 0.0 + + f1_per_bag = _align_bags(predicted_bags[1], gold_bags[1]) + f1 = np.mean(f1_per_bag) + f1 = round(f1, 2) + return exact_match, f1 + + +def _answer_to_bags(answer): + if isinstance(answer, (list, tuple)): + raw_spans = answer + else: + raw_spans = [answer] + normalized_spans = [] + token_bags = [] + for raw_span in raw_spans: + normalized_span = _normalize(raw_span) + normalized_spans.append(normalized_span) + token_bags.append(set(normalized_span.split())) + return normalized_spans, token_bags + + +def _align_bags(predicted, gold): + """ + Takes gold and predicted answer sets and first finds the optimal 1-1 alignment + between them and gets maximum metric values over all the answers. + """ + scores = np.zeros([len(gold), len(predicted)]) + for gold_index, gold_item in enumerate(gold): + for pred_index, pred_item in enumerate(predicted): + if _match_numbers_if_present(gold_item, pred_item): + scores[gold_index, pred_index] = _compute_f1(pred_item, gold_item) + row_ind, col_ind = linear_sum_assignment(-scores) + + max_scores = np.zeros([max(len(gold), len(predicted))]) + for row, column in zip(row_ind, col_ind): + max_scores[row] = max(max_scores[row], scores[row, column]) + return max_scores + + +def _compute_f1(predicted_bag, gold_bag): + intersection = len(gold_bag.intersection(predicted_bag)) + if not predicted_bag: + precision = 1.0 + else: + precision = intersection / float(len(predicted_bag)) + if not gold_bag: + recall = 1.0 + else: + recall = intersection / float(len(gold_bag)) + f1 = ( + (2 * precision * recall) / (precision + recall) + if not (precision == 0.0 and recall == 0.0) + else 0.0 + ) + return f1 + + +def _match_numbers_if_present(gold_bag, predicted_bag): + gold_numbers = set() + predicted_numbers = set() + for word in gold_bag: + if _is_number(word): + gold_numbers.add(word) + for word in predicted_bag: + if _is_number(word): + predicted_numbers.add(word) + if (not gold_numbers) or gold_numbers.intersection(predicted_numbers): + return True + return False + + +def _is_number(text): + try: + float(text) + return True + except ValueError: + return False + + +def _remove_articles(text): + return _ARTICLES.sub(" ", text) + + +def _white_space_fix(text): + return " ".join(text.split()) + + +def _remove_punc(text): + exclude = set(string.punctuation) + if not _is_number(text): + return "".join(ch for ch in text if ch not in exclude) + else: + return text + + +def _fix_number(text): + return str(float(text)) if _is_number(text) else text + + +def _tokenize(text): + return re.split(" |-", text) + + +def _normalize(answer): + tokens = [ + _white_space_fix(_remove_articles(_fix_number(_remove_punc(token.lower())))) + for token in _tokenize(answer) + ] + tokens = [token for token in tokens if token.strip()] + normalized = " ".join(tokens).strip() + return normalized diff --git a/lm-evaluation-harness/lm_eval/tasks/eq_bench/README.md b/lm-evaluation-harness/lm_eval/tasks/eq_bench/README.md new file mode 100644 index 0000000000000000000000000000000000000000..472890bdc832705e55f7a28209a74ea2af6b9865 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eq_bench/README.md @@ -0,0 +1,55 @@ +# EQ-Bench + +Title: `EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models` + +Abstract: https://arxiv.org/abs/2312.06281 + +EQ-Bench is a benchmark for language models designed to assess emotional intelligence. + +Why emotional intelligence? One reason is that it represents a subset of abilities that are important for the user experience, and which isn't explicitly tested by other benchmarks. Another reason is that it's not trivial to improve scores by fine tuning for the benchmark, which makes it harder to "game" the leaderboard. + +EQ-Bench is a little different from traditional psychometric tests. It uses a specific question format, in which the subject has to read a dialogue then rate the intensity of possible emotional responses of one of the characters. Every question is interpretative and assesses the ability to predict the magnitude of the 4 presented emotions. The test is graded without the need for a judge (so there is no length bias). It's cheap to run (only 171 questions), and produces results that correlate strongly with human preference (Arena ELO) and multi-domain benchmarks like MMLU. + +Homepage: https://eqbench.com/ + + +NOTE: There are some key differences between the lm-evaluation-harness version and the implementation described in the EQ-Bench paper (These have been OK'd by the author): + +- The lm-eval version uses the EQ-Bench v2 test set (171 questions) and score calculation. It does not incorporate the revision part of the prompt, as per v2.1 (https://github.com/EQ-bench/EQ-Bench) +- No retries in lm-eval version (EQ-Bench pipeline retries with successively higher temps if it encounters unparseable answers) +- In the original implementation, unparseable answers are excluded from the final score, and 83% of answers have to be parseable or a fail is returned. The lm-eval version instead assigns 0 to unparsable answers and has no fail criteria. So for lower performing models, there may be differences with the EQ-Bench leaderboard. + + +### Citation + +```bibtex +@misc{paech2023eqbench, + title={EQ-Bench: An Emotional Intelligence Benchmark for Large Language Models}, + author={Samuel J. Paech}, + year={2023}, + eprint={2312.06281}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `eq_bench` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/eq_bench/default.yaml b/lm-evaluation-harness/lm_eval/tasks/eq_bench/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16b1245b22c91e74a4ab398945a27ac31c82c5a8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eq_bench/default.yaml @@ -0,0 +1,20 @@ +task: eq_bench +dataset_path: pbevan11/EQ-Bench +output_type: generate_until +validation_split: validation +doc_to_text: prompt +doc_to_target: reference_answer_fullscale +process_results: !function utils.calculate_score_fullscale +generation_kwargs: + do_sample: false + temperature: 0.0 + max_gen_toks: 80 +metric_list: + - metric: eqbench + aggregation: mean + higher_is_better: true + - metric: percent_parseable + aggregation: mean + higher_is_better: true +metadata: + version: 2.1 diff --git a/lm-evaluation-harness/lm_eval/tasks/eq_bench/utils.py b/lm-evaluation-harness/lm_eval/tasks/eq_bench/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..326a0dc485f22c01053c10e65bc9bf05e1aeb590 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eq_bench/utils.py @@ -0,0 +1,54 @@ +import math +import re + + +def calculate_score_fullscale(docs, results): + reference = eval(docs["reference_answer_fullscale"]) + user = dict(re.findall(r"(\w+):\s+(\d+)", results[0])) + # First check that the emotions specified in the answer match those in the reference + if len(user.items()) != 4: + # print('! Error: 4 emotions were not returned') + # print(user) + return {"eqbench": 0, "percent_parseable": 0} + emotions_dict = {} + for emotion, user_emotion_score in user.items(): + for i in range(1, 5): + if emotion == reference[f"emotion{i}"]: + emotions_dict[emotion] = True + if len(emotions_dict) != 4: + print("! Error: emotions did not match reference") + print(user) + return {"eqbench": 0, "percent_parseable": 0} + + difference_tally = ( + 0 # Tally of differerence from reference answers for this question + ) + + # Iterate over each emotion in the user's answers. + for emotion, user_emotion_score in user.items(): + # If this emotion is in the reference, calculate the difference between the user's score and the reference score. + for i in range(1, 5): + if emotion == reference[f"emotion{i}"]: + d = abs( + float(user_emotion_score) - float(reference[f"emotion{i}_score"]) + ) + # this will be a value between 0 and 10 + if d == 0: + scaled_difference = 0 + elif d <= 5: + # S-shaped scaling function + # https://www.desmos.com/calculator + # 6.5\cdot\ \frac{1}{\left(1\ +\ e^{\left(-1.2\cdot\left(x-4\right)\right)}\right)} + scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4)))) + + else: + scaled_difference = d + difference_tally += scaled_difference + + # Inverting the difference tally so that the closer the answer is to reference, the higher the score. + # The adjustment constant is chosen such that answering randomly produces a score of zero. + adjust_const = 0.7477 + final_score = 10 - (difference_tally * adjust_const) + final_score_percent = final_score * 10 + + return {"eqbench": final_score_percent, "percent_parseable": 100} diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..41be213f943048f87ec82a038239e938f582cddd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge.yaml @@ -0,0 +1,9 @@ +dataset_name: [LANG] +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_[LANG] diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_gu.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_gu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..755006d26f4195d938619261556ad809f00cf78e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_gu.yaml @@ -0,0 +1,9 @@ +dataset_name: gu +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_gu \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_kn.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_kn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cee9bfbbf4583e297a07706c66b89e8929bb588 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_kn.yaml @@ -0,0 +1,9 @@ +dataset_name: kn +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_kn \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ml.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39fa8f34475914f880e6af97a99b7a2bb2ed8ece --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ml.yaml @@ -0,0 +1,9 @@ +dataset_name: ml +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_ml \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ta.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11ca1204798906b1b685cb8d38f3182693e347dd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_ta.yaml @@ -0,0 +1,9 @@ +dataset_name: ta +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_ta \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_te.yaml b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_te.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c60de530db8af44d956afe5ac84166cc17f610ef --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/indic_arc_challenge/indic_arc_challenge_te.yaml @@ -0,0 +1,9 @@ +dataset_name: te +include: indic_arc_challenge_common_yaml +doc_to_text: "Question: {{translated_question}}\nAnswer:" +doc_to_target: "{{translated_choices.label.index(answerKey)}}" +doc_to_choice: "{{translated_choices.text}}" +should_decontaminate: true +doc_to_decontamination_query: "Question: {{translated_question}}\nAnswer:" + +task: indic_arc_challenge_te \ No newline at end of file diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/README.md b/lm-evaluation-harness/lm_eval/tasks/kobest/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5a160da77140f37244dde849f42ab5b3f223a0a4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/README.md @@ -0,0 +1,37 @@ +# LAMBADA + +### Paper +Title: `KOBEST: Korean Balanced Evaluation of Significant Tasks` + +Abstract: https://arxiv.org/abs/2204.04541 + +A well-formulated benchmark plays a critical role in spurring advancements in the natural language processing (NLP) field, as it allows objective and precise evaluation of diverse models. As modern language models (LMs) have become more elaborate and sophisticated, more difficult benchmarks that require linguistic knowledge and reasoning have been proposed. However, most of these benchmarks only support English, and great effort is necessary to construct benchmarks for other low resource languages. To this end, we propose a new benchmark named Korean balanced evaluation of significant tasks (KoBEST), which consists of five Korean-language downstream tasks. Professional Korean linguists designed the tasks that require advanced Korean linguistic knowledge. Moreover, our data is purely annotated by humans and thoroughly reviewed to guarantee high data quality. We also provide baseline models and human performance results. Our dataset is available on the Huggingface. + + +Homepage: https://huggingface.co/datasets/skt/kobest_v1 + +### Groups and Tasks + +#### Groups + +- `kobest` + +#### Tasks + +- `kobest_boolq` +- `kobest_copa` +- `kobest_hallawag` +- `kobest_sentineg` +- `kobest_wic` + + +### Citation + +@misc{ + author={Dohyeong Kim, Myeongjun Jang, Deuk Sin Kwon, Eric Davis}, + title={KOBEST: Korean Balanced Evaluation of Significant Tasks}, + DOI={https://doi.org/10.48550/arXiv.2204.04541}, + publisher={arXiv}, + year={2022}, + month={Apr} +} diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_boolq.yaml b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_boolq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9932d56a9300f31bd96a1cd14ee2df091005b21 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_boolq.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_boolq +dataset_path: skt/kobest_v1 +dataset_name: boolq +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: "{{paragraph}} 질문: {{question}} 답변: " +doc_to_target: "{{label}}" +doc_to_choice: ["아니오", "예"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_copa.yaml b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_copa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f3b34e61fad86a037010dd892fd7b894346f456 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_copa.yaml @@ -0,0 +1,23 @@ +group: + - kobest +task: kobest_copa +dataset_path: skt/kobest_v1 +dataset_name: copa +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.copa_doc_to_text +doc_to_target: !function utils.copa_doc_to_target +doc_to_choice: !function utils.copa_doc_to_choice +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_hellaswag.yaml b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_hellaswag.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d83266a813ecd5a9ffd1989d45ac4c49b5779558 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_hellaswag.yaml @@ -0,0 +1,27 @@ +group: + - kobest +task: kobest_hellaswag +dataset_path: skt/kobest_v1 +dataset_name: hellaswag +training_split: train +validation_split: validation +output_type: multiple_choice +test_split: test +doc_to_text: "{{query}}" +doc_to_target: "{{label}}" +process_docs: !function utils.hellaswag_process_doc +doc_to_choice: "choices" +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: acc_norm + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_sentineg.yaml b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_sentineg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64319dca39c520c7a8f9c4f20f0ae2a9e44b7230 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_sentineg.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_sentineg +dataset_path: skt/kobest_v1 +dataset_name: sentineg +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.sentineg_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ["부정", "긍정"] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_wic.yaml b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_wic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..569d3393dbe78e1bb5d92e00d4ceac439282b9d0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/kobest_wic.yaml @@ -0,0 +1,25 @@ +group: + - kobest +task: kobest_wic +dataset_path: skt/kobest_v1 +dataset_name: wic +output_type: multiple_choice +training_split: train +validation_split: validation +test_split: test +doc_to_text: !function utils.wic_doc_to_text +doc_to_target: "{{label}}" +doc_to_choice: ['아니오', '예'] +metric_list: + - metric: acc + aggregation: mean + higher_is_better: True + - metric: f1 + aggregation: !function utils.macro_f1_score + average: macro + hf_evaluate: true + higher_is_better: True +metadata: + version: 1.0 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/kobest/utils.py b/lm-evaluation-harness/lm_eval/tasks/kobest/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9799ef038c09a67f92a2b174d57f5aaefa05a32f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/kobest/utils.py @@ -0,0 +1,48 @@ +from datasets import Dataset +from sklearn.metrics import f1_score + + +def copa_doc_to_text(doc: dict) -> str: + connector = {"원인": " 왜냐하면", "결과": " 그래서"}[doc["question"].strip()] + return f"""{doc["premise"]} {connector}""" + + +def copa_doc_to_target(doc: dict) -> str: + correct_choice = doc["alternative_1"] if doc["label"] == 0 else doc["alternative_2"] + return f"""{correct_choice}""" + + +def copa_doc_to_choice(doc: dict) -> list: + return [f"""{doc["alternative_1"]}""", f"""{doc["alternative_2"]}"""] + + +def sentineg_doc_to_text(doc: dict): + return f"""문장: {doc["sentence"]} 긍부정:""" + + +def wic_doc_to_text(doc: dict) -> str: + return f"""문장1: {doc["context_1"]} 문장2: {doc["context_2"]} 두 문장에서 {doc["word"]}가 같은 뜻으로 쓰였나?""" + + +def hellaswag_process_doc(doc: Dataset) -> Dataset: + def preprocessor(dataset): + return { + "query": f"""문장: {dataset["context"]}""", + "choices": [ + dataset["ending_1"], + dataset["ending_2"], + dataset["ending_3"], + dataset["ending_4"], + ], + "gold": int(dataset["label"]), + } + + return doc.map(preprocessor) + + +def macro_f1_score(items): + unzipped_list = list(zip(*items)) + golds = unzipped_list[0] + preds = unzipped_list[1] + fscore = f1_score(golds, preds, average="macro") + return fscore diff --git a/lm-evaluation-harness/lm_eval/tasks/mc_taco/README.md b/lm-evaluation-harness/lm_eval/tasks/mc_taco/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2bab6369468ecead4f3cfae9964e3a04d5e06423 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mc_taco/README.md @@ -0,0 +1,53 @@ +# MC Taco + +### Paper + +Title: `"Going on a vacation" takes longer than "Going for a walk": A Study of Temporal Commonsense Understanding` +Abstract: https://arxiv.org/abs/1909.03065 + +MC-TACO is a dataset of 13k question-answer pairs that require temporal commonsense +comprehension. The dataset contains five temporal properties, (1) duration (how long +an event takes), (2) temporal ordering (typical order of events), (3) typical time +(when an event occurs), (4) frequency (how often an event occurs), and (5) stationarity +(whether a state is maintained for a very long time or indefinitely). + +WARNING: Running this task with a `--limit` arg will give misleading results! The +corresponding dataset is structured such that each multiple-choice-question gathered +by the authors is split into question-option pairs, where each such pair gets +siloed into an individual document for plausibility testing. Because the harness +shuffles these documents, setting `--limit` will likely "cut off" certain candidate +answers. This is a problem because the task's metrics require an exhaustive evaluation +of a question's options. See section 4 of the paper for details. + +Homepage: https://leaderboard.allenai.org/mctaco/submissions/public + + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `mc_taco` + + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/mc_taco/default.yaml b/lm-evaluation-harness/lm_eval/tasks/mc_taco/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16aee3f7e76098acdd53ec88adf5cc078e3a5907 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/mc_taco/default.yaml @@ -0,0 +1,15 @@ +task: mc_taco +dataset_path: mc_taco +output_type: multiple_choice +validation_split: validation +test_split: test +doc_to_text: "{{sentence}}\nQuestion: {{question}}\nAnswer: {{answer}}\nPlausible:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: "{{question}} {{sentence}}" +metric_list: + - metric: acc + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/README.md b/lm-evaluation-harness/lm_eval/tasks/minerva_math/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7bfb7d5015a5b465a47b279a4dfb29ae170a5bfc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/README.md @@ -0,0 +1,70 @@ +# MATH +ℹ️ This is the 4-shot variant! +## Paper +Measuring Mathematical Problem Solving With the MATH Dataset +https://arxiv.org/abs/2103.03874 + +Many intellectual endeavors require mathematical problem solving, but this skill remains beyond the capabilities of computers. To measure this ability in machine learning models, we introduce MATH, a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations. + +NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is calculated using the `sympy` library. This requires additional dependencies, which can be installed via the `lm-eval[math]` extra. + +Homepage: https://github.com/hendrycks/math + + +## Citation +``` +@article{hendrycksmath2021, + title={Measuring Mathematical Problem Solving With the MATH Dataset}, + author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, + journal={NeurIPS}, + year={2021} +} + +@misc{2206.14858, +Author = {Aitor Lewkowycz and Anders Andreassen and David Dohan and Ethan Dyer and Henryk Michalewski and Vinay Ramasesh and Ambrose Slone and Cem Anil and Imanol Schlag and Theo Gutman-Solo and Yuhuai Wu and Behnam Neyshabur and Guy Gur-Ari and Vedant Misra}, +Title = {Solving Quantitative Reasoning Problems with Language Models}, +Year = {2022}, +Eprint = {arXiv:2206.14858}, +} +``` + +### Groups, Benchmarks and Tasks + +#### Benchmarks + +- `minerva_math` + +#### Groups + +- `math_word_problems` +- `generate_until` + +#### Tasks + +- `minerva_math_algebra` +- `minerva_math_counting_and_prob` +- `minerva_math_geometry` +- `minerva_math_intermediate_algebra` +- `minerva_math_num_theory` +- `minerva_math_prealgebra` +- `minerva_math_precalc` + +### Checklist + +The checklist is the following: + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + * The implementation in the original paper is one where the model is first fine-tuned on the data. They do have a few-shot evaluation for GPT-3, however the few-shot context used here is sourced from [Lewkowycz et al](https://arxiv.org/abs/2206.14858). The achieved accuracy on Llama-2 models is comparable to that provided in the paper, though not identical. + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? + +### Variant Wishlist + +- [ ] zero-shot variant diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c0a1547bf4656a009fa3261e9f8544d4152633d4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml @@ -0,0 +1,27 @@ +group: + - math_word_problems +task: minerva_math_algebra +dataset_path: EleutherAI/hendrycks_math +process_docs: !function utils.process_docs +dataset_name: algebra +output_type: generate_until +training_split: train +test_split: test +doc_to_text: !function utils.doc_to_text +process_results: !function utils.process_results +doc_to_target: "{{answer}}" +generation_kwargs: + until: + - "Problem:" + do_sample: false + temperature: 0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +num_fewshot: 0 +metadata: + version: 1.0 + num_fewshot: 4 +dataset_kwargs: + trust_remote_code: true diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml new file mode 100644 index 0000000000000000000000000000000000000000..688cd711c50d005d5d78ca55116ad333d96161ce --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: counting_and_probability +task: minerva_math_counting_and_prob diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..079ee70e9ed8997f351d1732c0c88dad1e4896de --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: geometry +task: minerva_math_geometry diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b3f063c36e10063dd06be93c290820a787ddd1d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: intermediate_algebra +task: minerva_math_intermediate_algebra diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..865e2f2c6e5397a07fb473a89f4d8eaf47d3eb52 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: prealgebra +task: minerva_math_prealgebra diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06e63abc7c206b43759217b38cd5db2395e554a9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml @@ -0,0 +1,3 @@ +include: minerva_math_algebra.yaml +dataset_name: precalculus +task: minerva_math_precalc diff --git a/lm-evaluation-harness/lm_eval/tasks/minerva_math/utils.py b/lm-evaluation-harness/lm_eval/tasks/minerva_math/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0de9bcafa1b15187f5d485b6253c3cab489fa164 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/minerva_math/utils.py @@ -0,0 +1,309 @@ +import re +import signal +from typing import Dict, List, Optional + +import datasets + +from lm_eval.utils import eval_logger + + +try: + import sympy + from sympy.parsing.latex import parse_latex +except ModuleNotFoundError: + raise ModuleNotFoundError( + "`sympy` is required for generating translation task prompt templates. \ +please install sympy via pip install lm-eval[math] or pip install -e .[math]", + ) + + +# taken from +# https://github.com/wellecks/lm-evaluation-harness/blob/master/lm_eval/tasks/minerva_math.py +def doc_to_text(doc: dict) -> str: + PROMPT = r"""Problem: +Find the domain of the expression $\frac{\sqrt{x-2}}{\sqrt{5-x}}$.} + +Solution: +The expressions inside each square root must be non-negative. Therefore, $x-2 \ge 0$, so $x\ge2$, and $5 - x \ge 0$, so $x \le 5$. Also, the denominator cannot be equal to zero, so $5-x>0$, which gives $x<5$. Therefore, the domain of the expression is $\boxed{[2,5)}$. +Final Answer: The final answer is $[2,5)$. I hope it is correct. + +Problem: +If $\det \mathbf{A} = 2$ and $\det \mathbf{B} = 12,$ then find $\det (\mathbf{A} \mathbf{B}).$ + +Solution: +We have that $\det (\mathbf{A} \mathbf{B}) = (\det \mathbf{A})(\det \mathbf{B}) = (2)(12) = \boxed{24}.$ +Final Answer: The final answer is $24$. I hope it is correct. + +Problem: +Terrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight? + +Solution: +If Terrell lifts two 20-pound weights 12 times, he lifts a total of $2\cdot 12\cdot20=480$ pounds of weight. If he lifts two 15-pound weights instead for $n$ times, he will lift a total of $2\cdot15\cdot n=30n$ pounds of weight. Equating this to 480 pounds, we can solve for $n$: +\begin{align*} +30n&=480\\ +\Rightarrow\qquad n&=480/30=\boxed{16} +\end{align*} +Final Answer: The final answer is $16$. I hope it is correct. + +Problem: +If the system of equations + +\begin{align*} +6x-4y&=a,\\ +6y-9x &=b. +\end{align*}has a solution $(x, y)$ where $x$ and $y$ are both nonzero, +find $\frac{a}{b},$ assuming $b$ is nonzero. + +Solution: +If we multiply the first equation by $-\frac{3}{2}$, we obtain + +$$6y-9x=-\frac{3}{2}a.$$Since we also know that $6y-9x=b$, we have + +$$-\frac{3}{2}a=b\Rightarrow\frac{a}{b}=\boxed{-\frac{2}{3}}.$$ +Final Answer: The final answer is $-\frac{2}{3}$. I hope it is correct.""" + + return PROMPT + "\n\n" + "Problem:" + "\n" + doc["problem"] + "\n\n" + "Solution:" + + +def process_docs(dataset: datasets.Dataset) -> datasets.Dataset: + def _process_doc(doc: dict) -> dict: + out_doc = { + "problem": doc["problem"], + "solution": doc["solution"], + "answer": normalize_final_answer( + remove_boxed(last_boxed_only_string(doc["solution"])) + ), + } + return out_doc + + return dataset.map(_process_doc) + + +def process_results(doc: dict, results: List[str]) -> Dict[str, int]: + candidates = results[0] + + unnormalized_answer = get_unnormalized_answer(candidates) + answer = normalize_final_answer(unnormalized_answer) + + if is_equiv(answer, doc["answer"]): + retval = 1 + else: + retval = 0 + + results = { + "exact_match": retval, + } + return results + + +def last_boxed_only_string(string: str) -> Optional[str]: + idx = string.rfind("\\boxed") + if "\\boxed " in string: + return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0] + if idx < 0: + idx = string.rfind("\\fbox") + if idx < 0: + return None + + i = idx + right_brace_idx = None + num_left_braces_open = 0 + while i < len(string): + if string[i] == "{": + num_left_braces_open += 1 + if string[i] == "}": + num_left_braces_open -= 1 + if num_left_braces_open == 0: + right_brace_idx = i + break + i += 1 + + if right_brace_idx is None: + retval = None + else: + retval = string[idx : right_brace_idx + 1] + + return retval + + +def remove_boxed(s: str) -> str: + if "\\boxed " in s: + left = "\\boxed " + assert s[: len(left)] == left + return s[len(left) :] + + left = "\\boxed{" + + assert s[: len(left)] == left + assert s[-1] == "}" + + return s[len(left) : -1] + + +class timeout: + def __init__(self, seconds=1, error_message="Timeout"): + self.seconds = seconds + self.error_message = error_message + + def handle_timeout(self, signum, frame): + raise TimeoutError(self.error_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self.handle_timeout) + signal.alarm(self.seconds) + + def __exit__(self, type, value, traceback): + signal.alarm(0) + + +def is_equiv(x1: str, x2: str) -> bool: + """ + x1 and x2 are normalized latex string + """ + try: + with timeout(seconds=5): + try: + parsed_x1 = parse_latex(x1) + parsed_x2 = parse_latex(x2) + except ( + sympy.parsing.latex.errors.LaTeXParsingError, + sympy.SympifyError, + TypeError, + ): + eval_logger.debug(f"couldn't parse one of {x1} or {x2}") + return False + + try: + diff = parsed_x1 - parsed_x2 + except TypeError: + eval_logger.debug(f"couldn't subtract {x1} and {x2}") + return False + + try: + if sympy.simplify(diff) == 0: + return True + else: + return False + except ValueError: + eval_logger.debug( + f"Had some trouble simplifying when comparing {x1} and {x2}" + ) + except TimeoutError: + eval_logger.debug(f"Timed out comparing {x1} and {x2}") + return False + except ImportError as e: + eval_logger.error(e) + raise + except Exception as e: + eval_logger.debug(f"Failed comparing {x1} and {x2} with {e}") + return False + + +def get_unnormalized_answer(text: str) -> str: + INVALID_ANSWER = "[invalidanswer]" + end_seq = "I hope it is correct." + text += end_seq + match = re.search( + r"Final Answer: The final answer is(.*?). I hope it is correct.", + text, + ) + if match: + return match.group(1).strip() + else: + return INVALID_ANSWER + + +SUBSTITUTIONS = [ + ("an ", ""), + ("a ", ""), + (".$", "$"), + ("\\$", ""), + (r"\ ", ""), + (" ", ""), + ("mbox", "text"), + (",\\text{and}", ","), + ("\\text{and}", ","), + ("\\text{m}", "\\text{}"), +] +REMOVED_EXPRESSIONS = [ + "square", + "ways", + "integers", + "dollars", + "mph", + "inches", + "ft", + "hours", + "km", + "units", + "\\ldots", + "sue", + "points", + "feet", + "minutes", + "digits", + "cents", + "degrees", + "cm", + "gm", + "pounds", + "meters", + "meals", + "edges", + "students", + "childrentickets", + "multiples", + "\\text{s}", + "\\text{.}", + "\\text{\ns}", + "\\text{}^2", + "\\text{}^3", + "\\text{\n}", + "\\text{}", + r"\mathrm{th}", + r"^\circ", + r"^{\circ}", + r"\;", + r",\!", + "{,}", + '"', + "\\dots", +] + + +def normalize_final_answer(final_answer: str) -> str: + """ + Normalize a final answer to a quantitative reasoning question. + + Copied character for character from appendix D of Lewkowycz et al. (2022) + """ + final_answer = final_answer.split("=")[-1] + + for before, after in SUBSTITUTIONS: + final_answer = final_answer.replace(before, after) + for expr in REMOVED_EXPRESSIONS: + final_answer = final_answer.replace(expr, "") + + # Extract answer that is in LaTeX math, is bold, + # is surrounded by a box, etc. + final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer) + final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer) + final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer) + + # Normalize shorthand TeX: + # \fracab -> \frac{a}{b} + # \frac{abc}{bef} -> \frac{abc}{bef} + # \fracabc -> \frac{a}{b}c + # \sqrta -> \sqrt{a} + # \sqrtab -> sqrt{a}b + final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer) + final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer) + final_answer = final_answer.replace("$", "") + + # Normalize 100,000 -> 100000 + if final_answer.replace(",", "").isdigit(): + final_answer = final_answer.replace(",", "") + + return final_answer diff --git a/lm-evaluation-harness/lm_eval/tasks/polemo2/README.md b/lm-evaluation-harness/lm_eval/tasks/polemo2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..837c704dfd5219fe49016b0eb9052b75dc612b99 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/polemo2/README.md @@ -0,0 +1,57 @@ +# PolEmo 2.0 + +### Paper + +Title: `Multi-Level Sentiment Analysis of PolEmo 2.0: Extended Corpus of Multi-Domain Consumer Reviews` + +Abstract: https://aclanthology.org/K19-1092/ + +The PolEmo 2.0 is a dataset of online consumer reviews in Polish from four domains: medicine, hotels, products, and university. It is human-annotated on a level of full reviews and individual sentences. It comprises over 8000 reviews, about 85% from the medicine and hotel domains. +The goal is to predict the sentiment of a review. There are two separate test sets, to allow for in-domain (medicine and hotels) as well as out-of-domain (products and university) validation. + +Homepage: https://clarin-pl.eu/dspace/handle/11321/710 + + +### Citation + +``` +@inproceedings{kocon-etal-2019-multi, + title = "Multi-Level Sentiment Analysis of {P}ol{E}mo 2.0: Extended Corpus of Multi-Domain Consumer Reviews", + author = "Koco{\'n}, Jan and + Mi{\l}kowski, Piotr and + Za{\'s}ko-Zieli{\'n}ska, Monika", + booktitle = "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)", + month = nov, + year = "2019", + address = "Hong Kong, China", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/K19-1092", + doi = "10.18653/v1/K19-1092", + pages = "980--991", + abstract = "In this article we present an extended version of PolEmo {--} a corpus of consumer reviews from 4 domains: medicine, hotels, products and school. Current version (PolEmo 2.0) contains 8,216 reviews having 57,466 sentences. Each text and sentence was manually annotated with sentiment in 2+1 scheme, which gives a total of 197,046 annotations. We obtained a high value of Positive Specific Agreement, which is 0.91 for texts and 0.88 for sentences. PolEmo 2.0 is publicly available under a Creative Commons copyright license. We explored recent deep learning approaches for the recognition of sentiment, such as Bi-directional Long Short-Term Memory (BiLSTM) and Bidirectional Encoder Representations from Transformers (BERT).", +} +``` + +### Groups and Tasks + +#### Groups + +* `polemo2`: Evaluates `polemo2_in` and `polemo2_out` + +#### Tasks + +* `polemo2_in`: evaluates sentiment predictions of in-domain (medicine and hotels) reviews +* `polemo2_out`: evaluates sentiment predictions of out-of-domain (products and university) reviews + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_in.yaml b/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_in.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fa16db87b29d18912dd030626d90b559821ea81 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_in.yaml @@ -0,0 +1,46 @@ +group: + - polemo2 +task: polemo2_in +dataset_path: allegro/klej-polemo2-in +dataset_name: null +output_type: generate_until +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Opinia: \"{{sentence}}\"\nOkreśl sentyment podanej opinii. Możliwe odpowiedzi:\nA - Neutralny\nB - Negatywny\nC - Pozytywny\nD - Niejednoznaczny\nPrawidłowa odpowiedź:" +doc_to_target: "{{['__label__meta_zero', '__label__meta_minus_m', '__label__meta_plus_m', '__label__meta_amb'].index(target)}}" +should_decontaminate: true +doc_to_decontamination_query: "{{sentence}}" +generation_kwargs: + until: + - "." + - "," + do_sample: false + temperature: 0.0 + max_gen_toks: 50 +filter_list: + - name: "score-first" + filter: + - function: "regex" + regex_pattern: "(\\b[ABCD]\\b)" + - function: "take_first" + - function: "map" + mapping_dict: + A: 0 + B: 1 + C: 2 + D: 3 + default_value: -1 + - function: "take_first" +metric_list: + - metric: f1 + aggregation: mean + higher_is_better: true + hf_evaluate: true + average: micro + - metric: accuracy + aggregation: mean + higher_is_better: true + hf_evaluate: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_out.yaml b/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_out.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a815a780055879bd275d50361fff6fff6f106640 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/polemo2/polemo2_out.yaml @@ -0,0 +1,4 @@ +include: polemo2_in.yaml +task: polemo2_out +dataset_path: allegro/klej-polemo2-out +dataset_name: klej-polemo2-out diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Ensenada b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Ensenada new file mode 100644 index 0000000000000000000000000000000000000000..63dfdf48a68d02240737ecd6af081e02eb0b6317 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Ensenada differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Guayaquil b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Guayaquil new file mode 100644 index 0000000000000000000000000000000000000000..40831be11e0ab028ada80cecf5fdc0e2a233ee17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Guayaquil differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Knox_IN b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Knox_IN new file mode 100644 index 0000000000000000000000000000000000000000..025d132dd48ba978c6fedf86d70173127be49d49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Knox_IN differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Louisville b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Louisville new file mode 100644 index 0000000000000000000000000000000000000000..3a335b37165d2b3d6a09a1716cb158e272087c9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Louisville differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Montserrat b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Montserrat new file mode 100644 index 0000000000000000000000000000000000000000..a662a57137b69e8ba445e899566222cdd422a764 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Montserrat differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Nuuk b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Nuuk new file mode 100644 index 0000000000000000000000000000000000000000..29958cf12a9df7cf775e3337b4894e4cf4b82f58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Nuuk differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Port-au-Prince b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Port-au-Prince new file mode 100644 index 0000000000000000000000000000000000000000..287f1439266639f9564149d6e162feaba3fbed86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Port-au-Prince differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Brazil/East b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Brazil/East new file mode 100644 index 0000000000000000000000000000000000000000..67935ff4da8f527e03cd05ed2aa20e601e10f921 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Brazil/East differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental new file mode 100644 index 0000000000000000000000000000000000000000..010c6bd04cae79078540da560ce38400bfe0ade6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/Continental differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland new file mode 100644 index 0000000000000000000000000000000000000000..184cb6a83b3392d0492c42297531c85e7e38c4f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Chile/EasterIsland differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT new file mode 100644 index 0000000000000000000000000000000000000000..c63474664a289aa3c3c0d8b2ce06d484679754c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+0 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+0 new file mode 100644 index 0000000000000000000000000000000000000000..c63474664a289aa3c3c0d8b2ce06d484679754c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+0 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+4 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+4 new file mode 100644 index 0000000000000000000000000000000000000000..5a25ff2a6afda2cb09b9e147ad20610bc1923444 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+4 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+6 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+6 new file mode 100644 index 0000000000000000000000000000000000000000..06e777d57e0267a0635b6b284729fddcfe6221dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+6 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+9 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+9 new file mode 100644 index 0000000000000000000000000000000000000000..78b9daa373d2aa2856eafcc92ebc6d899cafde5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT+9 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-10 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-10 new file mode 100644 index 0000000000000000000000000000000000000000..68ff77db0d95c7d054ef33c05e05ba71bcbbbdd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-10 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-14 b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-14 new file mode 100644 index 0000000000000000000000000000000000000000..7e9f9c465ce6211c65d617f60472c9b55b5052c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/GMT-14 differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Greenwich b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Greenwich new file mode 100644 index 0000000000000000000000000000000000000000..c63474664a289aa3c3c0d8b2ce06d484679754c0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Greenwich differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/UTC b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/UTC new file mode 100644 index 0000000000000000000000000000000000000000..91558be0c2bf903b2364215ba26d5227d6126508 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/UTC differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Zulu b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Zulu new file mode 100644 index 0000000000000000000000000000000000000000..91558be0c2bf903b2364215ba26d5227d6126508 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Etc/Zulu differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Astrakhan b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Astrakhan new file mode 100644 index 0000000000000000000000000000000000000000..a41624f5df9698d78049008a2bd8a77395c0480a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Astrakhan differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Lisbon b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Lisbon new file mode 100644 index 0000000000000000000000000000000000000000..55f01930ba92ff6852ae4745e78adb5f96c5b057 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Lisbon differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ljubljana b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ljubljana new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ljubljana differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow new file mode 100644 index 0000000000000000000000000000000000000000..ddb3f4e99a1030f33b56fad986c8d9c16e59eb32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Podgorica b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Podgorica new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Podgorica differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Samara b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Samara new file mode 100644 index 0000000000000000000000000000000000000000..d0ea2f25e9b4acaf3167a09a1c647943425e51b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Samara differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol new file mode 100644 index 0000000000000000000000000000000000000000..4bf24de1d9f8ebc410f120aa83d98b7e41d1e6c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Skopje b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Skopje new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Skopje differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sofia b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sofia new file mode 100644 index 0000000000000000000000000000000000000000..0e4d879332d21c93c229fc25587205020eeb3127 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sofia differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tallinn b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tallinn new file mode 100644 index 0000000000000000000000000000000000000000..b5acca3cf51e7f7b3176965748688ff41720246f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tallinn differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tirane b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tirane new file mode 100644 index 0000000000000000000000000000000000000000..0b86017d243f1b7bbb41d6b4feefcb2b7edfc7d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Tirane differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zagreb b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zagreb new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zagreb differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zaporozhye b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zaporozhye new file mode 100644 index 0000000000000000000000000000000000000000..52efea88065b220e44fd876de3bf3090fe62cc79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Zaporozhye differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Alaska b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Alaska new file mode 100644 index 0000000000000000000000000000000000000000..9bbb2fd3b361ea8aa4c126d14df5fa370343a63f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Alaska differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian new file mode 100644 index 0000000000000000000000000000000000000000..43236498f681cc06f64ca2afa613880331fe6fbb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Central b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Central new file mode 100644 index 0000000000000000000000000000000000000000..c6981a06b1d9c26f447518efe265a6454726eae7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Central differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/East-Indiana b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/East-Indiana new file mode 100644 index 0000000000000000000000000000000000000000..a84b6e99671f7305d1c459956e16a4dd772fc60e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/East-Indiana differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern new file mode 100644 index 0000000000000000000000000000000000000000..a8b9ab1992257d721ad627b14f535c3d4b020888 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke new file mode 100644 index 0000000000000000000000000000000000000000..025d132dd48ba978c6fedf86d70173127be49d49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Samoa b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Samoa new file mode 100644 index 0000000000000000000000000000000000000000..cb56709a77dedb471150f4907771bf38f1879ba4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/US/Samoa differ