diff --git a/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2beb56f0425e5f9bd1e4e40a3043f01662b6d4fa --- /dev/null +++ b/ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d39a7a1c189cdf31acd433b90f046b319e9ff43a3d908799fa7764268c8e00c9 +size 9372 diff --git a/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..64a7d6d898afc7b2cf686386805ceb6e545d666d --- /dev/null +++ b/ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3c282488722e3682cc79200a0a78bc6aed3f3f15b7cdae9e2f69030a62d9ef4 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..76cfc17237aff982f2d5320cd44778f4acc2735c --- /dev/null +++ b/ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31b0109ffbd63bcc6160beccc718cf7e609a103bd16646760ed129c646ffd32a +size 33555627 diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee161d81f62ef363f3321ca446216b3c81818d76 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "gaz_Latn" +"include": "_default_template_yaml" +"task": "belebele_gaz_Latn" +"test_split": "gaz_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mri_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mri_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d704cdc478175da6c8894a08da2d2e177f895ed2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mri_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mri_Latn" +"include": "_default_template_yaml" +"task": "belebele_mri_Latn" +"test_split": "mri_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee490bb0bab9c0f32c8e62d6d0bb553cbb91a192 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "tur_Latn" +"include": "_default_template_yaml" +"task": "belebele_tur_Latn" +"test_split": "tur_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c24156d846cb64db04877d9c36d394b54f56aa3e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ukr_Cyrl" +"include": "_default_template_yaml" +"task": "belebele_ukr_Cyrl" +"test_split": "ukr_Cyrl" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_urd_Arab.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_urd_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8c54e9ea623535b89b9147c9d9660a5723c5bdd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_urd_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "urd_Arab" +"include": "_default_template_yaml" +"task": "belebele_urd_Arab" +"test_split": "urd_Arab" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84161ec30ee875253d988a395f892b7982631765 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "以下是关于解剖学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_anatomy" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2dce17c0f0c1f9a99aff32ee633eab90026e823f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml @@ -0,0 +1,4 @@ +"dataset_name": "chinese_driving_rule" +"description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_chinese_driving_rule" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ed36425f2c3b866e62e0ac9b38dd0aeab118916 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml @@ -0,0 +1,4 @@ +"dataset_name": "clinical_knowledge" +"description": "以下是关于临床知识的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_clinical_knowledge" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml new file mode 100644 index 0000000000000000000000000000000000000000..717784ac3d636cfde4560d11c85f84a963d7e154 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_law" +"description": "以下是关于大学法律的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_college_law" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2bb920b53ab8856d717fea8e07e87077ec3b3f71 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml @@ -0,0 +1,4 @@ +"dataset_name": "electrical_engineering" +"description": "以下是关于电气工程的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_electrical_engineering" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f67be3fc40f5c038b455edcc6076675a4451261 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_chinese" +"description": "以下是关于小学语文的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_elementary_chinese" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88a653a9ee5e5978113626a35acbe50bd2ea5437 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "ethnology" +"description": "以下是关于民族学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_ethnology" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6982be9468bebc3d99a53baf120a11eae52704bb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml @@ -0,0 +1,4 @@ +"dataset_name": "global_facts" +"description": "以下是关于全球事实的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_global_facts" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c73ebe9171df9e9f0fbdf2fecddb251e56884702 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "以下是关于高中地理的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_high_school_geography" diff --git a/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..83f0255591a17711d6ac99cf164a29ffe2a69866 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml @@ -0,0 +1,4 @@ +"dataset_name": "professional_psychology" +"description": "以下是关于专业心理学的单项选择题,请直接给出正确答案的选项。\n\n" +"include": "_default_template_yaml" +"task": "cmmlu_professional_psychology" diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d95c83d01c681dede5e77797ab954af0797da104 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english.yaml @@ -0,0 +1,23 @@ +group: + - crows_pairs + - social_bias + - loglikelihood +task: crows_pairs_english +dataset_path: BigScienceBiasEval/crows_pairs_multilingual +dataset_name: english +test_split: test +output_type: multiple_choice +doc_to_text: "" +doc_to_target: 0 +doc_to_choice: !function utils.doc_to_choice +target_delimiter: "" +process_results: !function utils.process_results +metric_list: + - metric: likelihood_diff + aggregation: mean + higher_is_better: false + - metric: pct_stereotype + aggregation: mean + higher_is_better: false +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5b456206f774c49d2d32a92bfb6733f22bce609c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_autre +dataset_name: english +process_docs: !function utils.filter_autre diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9830d8140e68e5fb9b48d16e61ed3904e1d5ff06 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_disability +dataset_name: english +process_docs: !function utils.filter_disability diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6e185c109163bc9e9919853b789267ae8a87ae6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_gender +dataset_name: english +process_docs: !function utils.filter_gender diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69e22c53712169f9a12016ece922bb7bf81c7d24 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_race_color +dataset_name: english +process_docs: !function utils.filter_race_color diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c62882a0a96f60578d681a0dcd174e24317ecee --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_religion +dataset_name: english +process_docs: !function utils.filter_religion diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d678e75ca401570b9e80602282af0fb53200df90 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_sexual_orientation +dataset_name: english +process_docs: !function utils.filter_orientation diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4eb7f0034149f08f30249758c2baff4a8f0164e9 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml @@ -0,0 +1,3 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french +dataset_name: french diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5f47f99254edff8aecb5ebf9979edb92360e1e81 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_autre +dataset_name: french +process_docs: !function utils.filter_autre diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..643b16fd25e67c90f376b646bccd074e062a57f6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_disability +dataset_name: french +process_docs: !function utils.filter_disability diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abf645178d698c199997a51eb4c140b1179ef423 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_gender +dataset_name: french +process_docs: !function utils.filter_gender diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..876b20877c199bf577580e2cf7edafa412aa3f6d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_nationality +dataset_name: french +process_docs: !function utils.filter_nationality diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7930c3243f25a73274beb36750ae72e05ee27d76 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_religion +dataset_name: french +process_docs: !function utils.filter_religion diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ecf47a3846671c793f88c74728605f3909d14d7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_sexual_orientation +dataset_name: french +process_docs: !function utils.filter_orientation diff --git a/lm-evaluation-harness/lm_eval/tasks/crows_pairs/utils.py b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8cb38bdb4923f10f4fb237f7332bdfc785cd521f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/crows_pairs/utils.py @@ -0,0 +1,64 @@ +import datasets + + +def process_results(doc, results): + lls, _ = zip(*results) + + likelihood1, likelihood2 = lls + + # Calculate the absolute difference in loglikelihoods + diff = abs(likelihood1 - likelihood2) + + # if stereotypical sentence more likely (loglikelihood higher) + # then treat this as predicting stereotyped sentence + acc = 1.0 if likelihood1 > likelihood2 else 0.0 + + return {"likelihood_diff": diff, "pct_stereotype": acc} + + +def doc_to_choice(doc): + return [doc["sent_more"], doc["sent_less"]] + + +def filter_dataset(dataset: datasets.Dataset, bias_type: str) -> datasets.Dataset: + return dataset.filter(lambda example: example["bias_type"].startswith(bias_type)) + + +def filter_race_color(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "race-color") + + +def filter_socio(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "socioeconomic") + + +def filter_gender(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "gender") + + +def filter_age(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "age") + + +def filter_religion(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "religion") + + +def filter_disability(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "disability") + + +def filter_orientation(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "sexual-orientation") + + +def filter_nationality(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "nationality") + + +def filter_appearance(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "physical-appearance") + + +def filter_autre(dataset: datasets.Dataset) -> datasets.Dataset: + return filter_dataset(dataset, "autre") diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/README.md b/lm-evaluation-harness/lm_eval/tasks/super_glue/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c8e807718af5abcec3cbb0ac91af2aab6cb4a3fc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/README.md @@ -0,0 +1,77 @@ +# SuperGLUE + +### Paper + +Title: `SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems` +Abstract: `https://w4ngatang.github.io/static/papers/superglue.pdf` + +SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language +understanding tasks. + +Homepage: https://super.gluebenchmark.com/ + +### Citation + +``` +@inproceedings{NEURIPS2019_4496bf24, + author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett}, + pages = {}, + publisher = {Curran Associates, Inc.}, + title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, + url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf}, + volume = {32}, + year = {2019} +} +``` + +### Groups and Tasks + +#### Groups + +* `super-glue-lm-eval-v1`: SuperGLUE eval adapted from LM Eval V1 +* `super-glue-t5-prompt`: SuperGLUE prompt and evaluation that matches the T5 paper (if using accelerate, will error if record is included.) + +#### Tasks + +Comparison between validation split score on T5x and LM-Eval (T5x models converted to HF) +| T5V1.1 Base | SGLUE | BoolQ | CB | Copa | MultiRC | ReCoRD | RTE | WiC | WSC | +| ----------- | ------| ----- | --------- | ---- | ------- | ------ | --- | --- | --- | +| T5x | 69.47 | 78.47(acc) | 83.93(f1) 87.5(acc) | 50(acc) | 73.81(f1) 33.26(em) | 70.09(em) 71.34(f1) | 78.7(acc) | 63.64(acc) | 75(acc) | +| LM-Eval | 71.35 | 79.36(acc) | 83.63(f1) 87.5(acc) | 63(acc) | 73.45(f1) 33.26(em) | 69.85(em) 68.86(f1) | 78.34(acc) | 65.83(acc) | 75.96(acc) | + + + +* `super-glue-lm-eval-v1` + - `boolq` + - `cb` + - `copa` + - `multirc` + - `record` + - `rte` + - `wic` + - `wsc` + +* `super-glue-t5-prompt` + - `super_glue-boolq-t5-prompt` + - `super_glue-cb-t5-prompt` + - `super_glue-copa-t5-prompt` + - `super_glue-multirc-t5-prompt` + - `super_glue-record-t5-prompt` + - `super_glue-rte-t5-prompt` + - `super_glue-wic-t5-prompt` + - `super_glue-wsc-t5-prompt` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f26e4682c40ff7c7ba1183fecaadb5718206dbfd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/default.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-lm-eval-v1 +task: boolq +dataset_path: super_glue +dataset_name: boolq +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: ["no", "yes"] +should_decontaminate: true +doc_to_decontamination_query: passage +metric_list: + - metric: acc +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/seq2seq.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/seq2seq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..569316cb31b909755ba6916dea4e54f80fc95df1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/seq2seq.yaml @@ -0,0 +1,26 @@ +group: + - super-glue-lm-eval-v1-seq2seq +task: "boolq-seq2seq" +dataset_path: super_glue +dataset_name: boolq +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: label +doc_to_choice: [' no', ' yes'] +target_delimiter: "" +generation_kwargs: + until: + - "\n\n" + - "\n" + do_sample: false + temperature: 0.0 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7089381ad86c05913b111d1888878b721a33a222 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-boolq-t5-prompt +dataset_path: super_glue +dataset_name: boolq +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "boolq passage: {{passage}} question: {{question}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c575e9872aa712eff69f779a7114d5baed487706 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/default.yaml @@ -0,0 +1,17 @@ +group: + - super-glue-lm-eval-v1 +task: cb +dataset_path: super_glue +dataset_name: cb +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "{{premise}}\nQuestion: {{hypothesis}}. True, False, or Neither?\nAnswer:" +doc_to_target: label +doc_to_choice: ['True', 'False', 'Neither'] +metric_list: + - metric: acc + - metric: f1 + aggregation: !function "aggregate.cb_multi_fi" +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..984e17935ad2479fb9d48dabfeb14f14269da2db --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5-prompt.yaml @@ -0,0 +1,25 @@ +group: + - super-glue-t5-prompt +task: super_glue-cb-t5-prompt +dataset_path: super_glue +dataset_name: cb +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "cb hypothesis: {{hypothesis}} premise: {{premise}}" +doc_to_target: label +doc_to_choice: ['entailment', 'contradiction', 'neutral'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + - metric: !function "t5_utils.mean_3class_f1" + aggregation: !function "t5_utils.agg_mean_3class_f1" + higher_is_better: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5_utils.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ec02e34538e15f71861f354b437060da5390544e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5_utils.py @@ -0,0 +1,30 @@ +import sklearn.metrics + + +def mean_3class_f1(predictions, references): # This is a passthrough function + string_label = ["entailment", "contradiction", "neutral"] + predictions = ( + string_label.index(predictions[0]) if predictions[0] in string_label else 0 + ) + references = string_label.index(references[0]) + + return (predictions, references) + + +def agg_mean_3class_f1(items): + predictions, references = zip(*items) + + """Computes the unweighted average of the F1 per class.""" + metric_str = "fbeta_score" + metric_fn_kwargs = { + "beta": 1, + "labels": range(3), + "average": "macro", + } + + def _fn(predictions, references): + metric_fn = getattr(sklearn.metrics, metric_str) + metric_val = metric_fn(references, predictions, **metric_fn_kwargs) + return metric_val + + return _fn(predictions, references) diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/__pycache__/utils.cpython-310.pyc b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f52a4a16c5ff9dcd47ac918ba9804115b248964 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/__pycache__/utils.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1af5dbf47258e203e7a1b506e7ba6e91351a61e4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: copa +dataset_path: super_glue +dataset_name: copa +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +doc_to_choice: !function utils.doc_to_choice +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..20a90db98d28a78307b7e46b99834eaf98cc3f9e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-copa-t5-prompt +dataset_path: super_glue +dataset_name: copa +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} premise: {{premise}} question: {{question}}" +doc_to_target: label +doc_to_choice: ['choice1', 'choice2'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/utils.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3afc868eb486c47c51b0036ce955502bc377c9c4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/copa/utils.py @@ -0,0 +1,21 @@ +def convert_choice(choice): + return choice[0].lower() + choice[1:] + + +def doc_to_text(doc): + # Drop the period + connector = { + "cause": "because", + "effect": "therefore", + }[doc["question"]] + return doc["premise"].strip()[:-1] + f" {connector}" + + +def doc_to_target(doc): + correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"] + # Connect the sentences + return " " + convert_choice(correct_choice) + + +def doc_to_choice(doc): + return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])] diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f86855a7811ca1e2c11f61201237f8d10ed524c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: "wic" +dataset_path: super_glue +dataset_name: wic +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Is the word '{{sentence1[start1:end1]}}' used in the same way in the two sentences above?\nAnswer:" +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a0dbb2f7fd64f2ec3ae3e6d58c4dd7e0963edc2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wic/t5-prompt.yaml @@ -0,0 +1,22 @@ +group: + - super-glue-t5-prompt +task: super_glue-wic-t5-prompt +dataset_path: super_glue +dataset_name: wic +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}" +doc_to_target: label +doc_to_choice: ['False', 'True'] +generation_kwargs: + until: + - "" +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/default.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9c7ec347c2beccb8fdc54ada1082a763c9cfe0d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/default.yaml @@ -0,0 +1,15 @@ +group: + - super-glue-lm-eval-v1 +task: wsc +dataset_path: super_glue +dataset_name: wsc.fixed +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function preprocess_wsc.default_doc_to_text +doc_to_target: label +doc_to_choice: ['no', 'yes'] +metric_list: + - metric: acc +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py new file mode 100644 index 0000000000000000000000000000000000000000..c62c25676a51fd8e60a4d9fc6f8755041bba7534 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py @@ -0,0 +1,17 @@ +from lm_eval.utils import general_detokenize + + +def default_doc_to_text(x): + raw_passage = x["text"] + # NOTE: HuggingFace span indices are word-based not character-based. + pre = " ".join(raw_passage.split()[: x["span2_index"]]) + post = raw_passage[len(pre) + len(x["span2_text"]) + 1 :] + passage = general_detokenize(pre + " *{}*".format(x["span2_text"]) + post) + noun = x["span1_text"] + pronoun = x["span2_text"] + text = ( + f"Passage: {passage}\n" + + f'Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?\n' + + "Answer:" + ) + return text diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6030d1faf210da7b9aab301d059a74978a411a1f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml @@ -0,0 +1,20 @@ +group: + - super-glue-t5-prompt +task: super_glue-wsc-t5-prompt +dataset_path: super_glue +dataset_name: wsc.fixed +training_split: train +validation_split: validation +output_type: generate_until +doc_to_text: !function "t5_utils.doc_to_text" +process_results: !function "t5_utils.process_results" +doc_to_target: label +generation_kwargs: + until: + - "" +metric_list: + - metric: accuracy + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5_utils.py b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2860a2a903944a11fff0e981c5135214a8cf8f17 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5_utils.py @@ -0,0 +1,104 @@ +import re +from typing import List + + +def doc_to_text(x): + text = re.sub(r" X ", " *" + x["span2_text"] + "* ", _wsc_inputs(x)) + return "wsc: " + text + + +def _wsc_inputs(x): + words = x["text"].split(" ") + + # We would need some special logic to handle the case where the pronoun is the + # first or last word in the text. None of the examples in WSC seem to have + # this, so we are ignoring these cases. + assert x["span2_index"] > 0 + assert x["span2_index"] < len(words) + pronoun_index = x["span2_index"] + + def create_input(): + assert words[pronoun_index] == x["span2_text"] + + return " ".join( + [ + " ".join(words[:pronoun_index]), + "X", + " ".join(words[pronoun_index + 1 :]), + ] + ) + + # Handle some special cases. + if ( + x["text"] + == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for him," he said. ' + ): + return ( + "The boy continued to whip the pony , and eventually the pony threw " + 'him over. John laughed out quite loud. "Good for X ," he said.' + ) + + # Using the span2_index, we get 'use' instead of 'it'. + if ( + x["text"] + == "When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?" + ): + return ( + "When they had eventually calmed down a bit , and had gotten home, " + "Mr. Farley put the magic pebble in an iron safe . Some day they might " + "want to use X , but really for now, what more could they wish for?" + ) + + return create_input() + + +DETERMINERS = { + "a", + "an", + "few", + "her", + "his", + "each", + "every", + "many", + "much", + "my", + "our", + "some", + "that", + "the", + "their", + "these", + "this", + "those", + "which", + "whose", + "your", +} + + +def clean(s: str) -> str: + """Ignore capitalization and determiners.""" + s = s.strip().lower() + return " ".join([w for w in s.split(" ") if w not in DETERMINERS]) + + +def process_results(docs: dict, resps: List): + prediction = clean(resps[0]) + reference = clean(docs["span1_text"]) + + if ("'" in prediction) != ("'" in reference): + # referent is "Bob's hat" as predicting the referent. + predicted_referent = False + else: + prediction_words = set(prediction.split(" ")) + referent_words = set(reference.split(" ")) + + # Handle cases where the prediction is "fuzzy bunny" and the referent is + # "bunny". + predicted_referent = prediction_words.issubset( + referent_words + ) or referent_words.issubset(prediction_words) + + acc = 1.0 if predicted_referent == docs["label"] else 0.0 + return {"accuracy": acc} diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/README.md b/lm-evaluation-harness/lm_eval/tasks/translation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bd36302619a2cc1b40b57ef758d328d85580e420 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/README.md @@ -0,0 +1,39 @@ +# Translation Tasks + +### Paper + + + +### Citation + +``` + +``` + +### Groups and Tasks + +#### Groups + +* `gpt3_translation_tasks` +* `wmt14` +* `wmt16` +* `wmt20` +* `iwslt2017` + +#### Tasks + +* + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + * [ ] Checked for equivalence with v0.3.0 LM Evaluation Harness diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_ar-en.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_ar-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea713393c1dfbe9f7e1f6d055dd4768ace31269e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_ar-en.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Arabic phrase: {{translation["ar"]}} + + English phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-ar-en diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_en-ar.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_en-ar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..891ad50fd6fb60fdb8f21f9004857d739a15640f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/iwslt2017_en-ar.yaml @@ -0,0 +1,13 @@ +# Generated by utils.py +dataset_name: iwslt2017-en-ar +dataset_path: iwslt2017 +doc_to_target: ' {{translation["ar"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + Arabic phrase:' +group: +- generate_until +- translation +- iwslt2017 +include: wmt_common_yaml +task: iwslt2017-en-ar diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/utils.py b/lm-evaluation-harness/lm_eval/tasks/translation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f30c4d86259259a325edcee3b64ad3199b966c96 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/utils.py @@ -0,0 +1,118 @@ +import argparse + +import yaml + + +try: + import pycountry +except ModuleNotFoundError: + raise Exception( + "`pycountry` is required for generating translation task prompt templates. \ +please install pycountry via pip install lm-eval[multilingual] or pip install -e .[multilingual]", + ) + + +# Different translation benchmarks included in the library. Mostly WMT. +# These correspond to dataset names (subsets) on HuggingFace for each dataset. +# A yaml file is generated by this script for each language pair. + +gpt3_translation_benchmarks = { + "wmt14": ["fr-en"], # ["en-fr", "fr-en"], # French + "wmt16": [ + "ro-en", + "de-en", + ], # ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian +} + +# 28 total +LANGUAGES = { + **gpt3_translation_benchmarks, + # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"), + "iwslt2017": ["en-ar"], # Arabic +} + + +def code_to_language(code): + # key is alpha_2 or alpha_3 depending on the code length + language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code}) + return language_tuple.name + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + for dataset_name in LANGUAGES[lang]: + src_lang, _, tgt_lang = dataset_name.partition("-") + for src, tgt in [[src_lang, tgt_lang], [tgt_lang, src_lang]]: + # both translation directions for each lang pair + lang_pair = src + "-" + tgt + file_name = f"{lang}_{lang_pair}.yaml" + try: + source, target = code_to_language(src), code_to_language(tgt) + + groups = ["generate_until", "translation", lang] + if lang in gpt3_translation_benchmarks.keys(): + groups += ["gpt3_translation_benchmarks"] + + with open( + f"{output_dir}/{file_name}", + "w" if overwrite else "x", + encoding="utf8", + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "wmt_common_yaml", + "group": groups, + "dataset_path": lang, + "dataset_name": dataset_name + if not (lang == "iwslt2017") + else "iwslt2017-" + dataset_name, + "task": f"{lang}-{lang_pair}", + "doc_to_text": f"{source} phrase: " + + "{{translation[" + + f'"{src}"' + + "]}}\n" + + f"{target} phrase:", + "doc_to_target": " {{" + + "translation[" + + f'"{tgt}"]' + + "}}", + }, + f, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_en-fr.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_en-fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7e42dca5acca5036ec8b3b619501557c6a1c36c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_en-fr.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["fr"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + French phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-en-fr diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_fr-en.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_fr-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09ddd57d6049c29f35150aa4de94c6db3604a0a4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt14_fr-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: fr-en +dataset_path: wmt14 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'French phrase: {{translation["fr"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt14 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt14-fr-en diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_de-en.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_de-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23d50e4aacc8c4e19a8b282e4051e80ec18edf29 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_de-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'German phrase: {{translation["de"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-de-en diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_en-de.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_en-de.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d391b6c6b879c15f0c8d63119824647ea6997c3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_en-de.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: de-en +dataset_path: wmt16 +doc_to_target: ' {{translation["de"]}}' +doc_to_text: 'English phrase: {{translation["en"]}} + + German phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-en-de diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_ro-en.yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_ro-en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39441eac1c8cb2a8ec4d4e9c9b31402607a5ea77 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt16_ro-en.yaml @@ -0,0 +1,14 @@ +# Generated by utils.py +dataset_name: ro-en +dataset_path: wmt16 +doc_to_target: ' {{translation["en"]}}' +doc_to_text: 'Romanian phrase: {{translation["ro"]}} + + English phrase:' +group: +- generate_until +- translation +- wmt16 +- gpt3_translation_benchmarks +include: wmt_common_yaml +task: wmt16-ro-en diff --git a/lm-evaluation-harness/lm_eval/tasks/translation/wmt_common_yaml b/lm-evaluation-harness/lm_eval/tasks/translation/wmt_common_yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cb3c7c8f8d8305e9907c89c94d6f8fd95c709fc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/translation/wmt_common_yaml @@ -0,0 +1,17 @@ +output_type: generate_until +training_split: train +validation_split: validation +fewshot_split: validation +test_split: test +metric_list: + - metric: bleu + - metric: ter + - metric: chrf +generation_kwargs: + until: + - "\n" + do_sample: false + temperature: 0.0 +repeats: 1 +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/winogrande/README.md b/lm-evaluation-harness/lm_eval/tasks/winogrande/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d763dffc02ada2e9c619e3ab74423f81dd368d8a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/winogrande/README.md @@ -0,0 +1,54 @@ +# WinoGrande + +### Paper + +Title: `WinoGrande: An Adversarial Winograd Schema Challenge at Scale` + +Abstract: https://arxiv.org/abs/1907.10641 + +WinoGrande is a collection of 44k problems, inspired by Winograd Schema Challenge +(Levesque, Davis, and Morgenstern 2011), but adjusted to improve the scale and +robustness against the dataset-specific bias. Formulated as a fill-in-a-blank +task with binary options, the goal is to choose the right option for a given +sentence which requires commonsense reasoning. + +NOTE: This evaluation of Winogrande uses partial evaluation as described by +Trinh & Le in Simple Method for Commonsense Reasoning (2018). +See: https://arxiv.org/abs/1806.02847 + +Homepage: https://leaderboard.allenai.org/winogrande/submissions/public + + +### Citation + +``` +@article{sakaguchi2019winogrande, + title={WinoGrande: An Adversarial Winograd Schema Challenge at Scale}, + author={Sakaguchi, Keisuke and Bras, Ronan Le and Bhagavatula, Chandra and Choi, Yejin}, + journal={arXiv preprint arXiv:1907.10641}, + year={2019} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `winogrande` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/winogrande/__pycache__/preprocess_winogrande.cpython-310.pyc b/lm-evaluation-harness/lm_eval/tasks/winogrande/__pycache__/preprocess_winogrande.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f313661eaa1ea41cece247e9eca0e46958a13b79 Binary files /dev/null and b/lm-evaluation-harness/lm_eval/tasks/winogrande/__pycache__/preprocess_winogrande.cpython-310.pyc differ diff --git a/lm-evaluation-harness/lm_eval/tasks/winogrande/default.yaml b/lm-evaluation-harness/lm_eval/tasks/winogrande/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..213f0727fea6ef8d5b6f87a78f093de89b6f80f6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/winogrande/default.yaml @@ -0,0 +1,17 @@ +task: winogrande +dataset_path: winogrande +dataset_name: winogrande_xl +output_type: multiple_choice +training_split: train +validation_split: validation +doc_to_text: !function preprocess_winogrande.doc_to_text +doc_to_target: !function preprocess_winogrande.doc_to_target +doc_to_choice: !function preprocess_winogrande.doc_to_choice +should_decontaminate: true +doc_to_decontamination_query: sentence +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/winogrande/preprocess_winogrande.py b/lm-evaluation-harness/lm_eval/tasks/winogrande/preprocess_winogrande.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2076a762905cd151db382ec78109795975d74f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/winogrande/preprocess_winogrande.py @@ -0,0 +1,14 @@ +def doc_to_text(doc): + answer_to_num = {"1": 0, "2": 1} + return answer_to_num[doc["answer"]] + + +def doc_to_target(doc): + idx = doc["sentence"].index("_") + 1 + return doc["sentence"][idx:].strip() + + +def doc_to_choice(doc): + idx = doc["sentence"].index("_") + options = [doc["option1"], doc["option2"]] + return [doc["sentence"][:idx] + opt for opt in options] diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Busingen b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Busingen new file mode 100644 index 0000000000000000000000000000000000000000..ad6cf59281a1046d9dcd045fda521585e3e33e06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Busingen differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Chisinau b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Chisinau new file mode 100644 index 0000000000000000000000000000000000000000..5ee23fe0e59f044598675db44d53c20590b88934 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Chisinau differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Copenhagen b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Copenhagen new file mode 100644 index 0000000000000000000000000000000000000000..7f6d958f8630cba512d8e58ca8edfbd516291522 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Copenhagen differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Gibraltar b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Gibraltar new file mode 100644 index 0000000000000000000000000000000000000000..a38f11ffdf2a9e538695ea8cae20b1d2c04164af Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Gibraltar differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Kiev b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Kiev new file mode 100644 index 0000000000000000000000000000000000000000..52efea88065b220e44fd876de3bf3090fe62cc79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Kiev differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Madrid b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Madrid new file mode 100644 index 0000000000000000000000000000000000000000..53f4cd101c18058a1ee4d4b9d1184ada11d74232 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Madrid differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Monaco b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Monaco new file mode 100644 index 0000000000000000000000000000000000000000..7d366c6098c49ecd546e1cc1538919e1414a3aee Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Monaco differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Prague b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Prague new file mode 100644 index 0000000000000000000000000000000000000000..ce8f433ece44f0b96b18d3b5780730e7f9cad9f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Prague differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Rome b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Rome new file mode 100644 index 0000000000000000000000000000000000000000..32b2899a306dde401fa2e3952d06f5f4d9952bed Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Rome differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sarajevo b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sarajevo new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Sarajevo differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ulyanovsk b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ulyanovsk new file mode 100644 index 0000000000000000000000000000000000000000..d668233b37f268a745e995177195b06ffab1e69b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Ulyanovsk differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Uzhgorod b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Uzhgorod new file mode 100644 index 0000000000000000000000000000000000000000..52efea88065b220e44fd876de3bf3090fe62cc79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Uzhgorod differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz new file mode 100644 index 0000000000000000000000000000000000000000..ad6cf59281a1046d9dcd045fda521585e3e33e06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz differ diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vilnius b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vilnius new file mode 100644 index 0000000000000000000000000000000000000000..7abd63fa608e0186b9f154d9fcc32472c28f6759 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vilnius differ