applied-ai-018 commited on
Commit
8844eba
·
verified ·
1 Parent(s): 88dfe69

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. lm-evaluation-harness/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml +4 -0
  5. lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mri_Latn.yaml +4 -0
  6. lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml +4 -0
  7. lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml +4 -0
  8. lm-evaluation-harness/lm_eval/tasks/belebele/belebele_urd_Arab.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml +4 -0
  12. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml +4 -0
  13. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml +4 -0
  18. lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english.yaml +23 -0
  20. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml +4 -0
  22. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml +4 -0
  23. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml +4 -0
  24. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml +3 -0
  27. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml +4 -0
  28. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml +4 -0
  29. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml +4 -0
  30. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml +4 -0
  31. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml +4 -0
  32. lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml +4 -0
  33. lm-evaluation-harness/lm_eval/tasks/crows_pairs/utils.py +64 -0
  34. lm-evaluation-harness/lm_eval/tasks/super_glue/README.md +77 -0
  35. lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/default.yaml +17 -0
  36. lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/seq2seq.yaml +26 -0
  37. lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +22 -0
  38. lm-evaluation-harness/lm_eval/tasks/super_glue/cb/default.yaml +17 -0
  39. lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5-prompt.yaml +25 -0
  40. lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5_utils.py +30 -0
  41. lm-evaluation-harness/lm_eval/tasks/super_glue/copa/__pycache__/utils.cpython-310.pyc +0 -0
  42. lm-evaluation-harness/lm_eval/tasks/super_glue/copa/default.yaml +15 -0
  43. lm-evaluation-harness/lm_eval/tasks/super_glue/copa/t5-prompt.yaml +22 -0
  44. lm-evaluation-harness/lm_eval/tasks/super_glue/copa/utils.py +21 -0
  45. lm-evaluation-harness/lm_eval/tasks/super_glue/wic/default.yaml +15 -0
  46. lm-evaluation-harness/lm_eval/tasks/super_glue/wic/t5-prompt.yaml +22 -0
  47. lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/default.yaml +15 -0
  48. lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py +17 -0
  49. lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +20 -0
  50. lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5_utils.py +104 -0
ckpts/universal/global_step20/zero/16.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d39a7a1c189cdf31acd433b90f046b319e9ff43a3d908799fa7764268c8e00c9
3
+ size 9372
ckpts/universal/global_step20/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c282488722e3682cc79200a0a78bc6aed3f3f15b7cdae9e2f69030a62d9ef4
3
+ size 33555612
ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b0109ffbd63bcc6160beccc718cf7e609a103bd16646760ed129c646ffd32a
3
+ size 33555627
lm-evaluation-harness/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "gaz_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_gaz_Latn"
4
+ "test_split": "gaz_Latn"
lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mri_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mri_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mri_Latn"
4
+ "test_split": "mri_Latn"
lm-evaluation-harness/lm_eval/tasks/belebele/belebele_tur_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "tur_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_tur_Latn"
4
+ "test_split": "tur_Latn"
lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ukr_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ukr_Cyrl"
4
+ "test_split": "ukr_Cyrl"
lm-evaluation-harness/lm_eval/tasks/belebele/belebele_urd_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "urd_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_urd_Arab"
4
+ "test_split": "urd_Arab"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "anatomy"
2
+ "description": "以下是关于解剖学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_anatomy"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_driving_rule"
2
+ "description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_driving_rule"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "clinical_knowledge"
2
+ "description": "以下是关于临床知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_clinical_knowledge"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_law"
2
+ "description": "以下是关于大学法律的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_college_law"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "electrical_engineering"
2
+ "description": "以下是关于电气工程的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_electrical_engineering"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "elementary_chinese"
2
+ "description": "以下是关于小学语文的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_elementary_chinese"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ethnology"
2
+ "description": "以下是关于民族学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_ethnology"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "global_facts"
2
+ "description": "以下是关于全球事实的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_global_facts"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_geography"
2
+ "description": "以下是关于高中地理的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_high_school_geography"
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "professional_psychology"
2
+ "description": "以下是关于专业心理学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_professional_psychology"
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - crows_pairs
3
+ - social_bias
4
+ - loglikelihood
5
+ task: crows_pairs_english
6
+ dataset_path: BigScienceBiasEval/crows_pairs_multilingual
7
+ dataset_name: english
8
+ test_split: test
9
+ output_type: multiple_choice
10
+ doc_to_text: ""
11
+ doc_to_target: 0
12
+ doc_to_choice: !function utils.doc_to_choice
13
+ target_delimiter: ""
14
+ process_results: !function utils.process_results
15
+ metric_list:
16
+ - metric: likelihood_diff
17
+ aggregation: mean
18
+ higher_is_better: false
19
+ - metric: pct_stereotype
20
+ aggregation: mean
21
+ higher_is_better: false
22
+ metadata:
23
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_autre
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_autre
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_disability
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_disability
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_gender
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_gender
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_race_color
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_race_color
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_religion
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_religion
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_english_sexual_orientation
3
+ dataset_name: english
4
+ process_docs: !function utils.filter_orientation
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french
3
+ dataset_name: french
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_autre
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_autre
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_disability
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_disability
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_gender
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_gender
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_nationality
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_nationality
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_religion
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_religion
lm-evaluation-harness/lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: crows_pairs_english.yaml
2
+ task: crows_pairs_french_sexual_orientation
3
+ dataset_name: french
4
+ process_docs: !function utils.filter_orientation
lm-evaluation-harness/lm_eval/tasks/crows_pairs/utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ def process_results(doc, results):
5
+ lls, _ = zip(*results)
6
+
7
+ likelihood1, likelihood2 = lls
8
+
9
+ # Calculate the absolute difference in loglikelihoods
10
+ diff = abs(likelihood1 - likelihood2)
11
+
12
+ # if stereotypical sentence more likely (loglikelihood higher)
13
+ # then treat this as predicting stereotyped sentence
14
+ acc = 1.0 if likelihood1 > likelihood2 else 0.0
15
+
16
+ return {"likelihood_diff": diff, "pct_stereotype": acc}
17
+
18
+
19
+ def doc_to_choice(doc):
20
+ return [doc["sent_more"], doc["sent_less"]]
21
+
22
+
23
+ def filter_dataset(dataset: datasets.Dataset, bias_type: str) -> datasets.Dataset:
24
+ return dataset.filter(lambda example: example["bias_type"].startswith(bias_type))
25
+
26
+
27
+ def filter_race_color(dataset: datasets.Dataset) -> datasets.Dataset:
28
+ return filter_dataset(dataset, "race-color")
29
+
30
+
31
+ def filter_socio(dataset: datasets.Dataset) -> datasets.Dataset:
32
+ return filter_dataset(dataset, "socioeconomic")
33
+
34
+
35
+ def filter_gender(dataset: datasets.Dataset) -> datasets.Dataset:
36
+ return filter_dataset(dataset, "gender")
37
+
38
+
39
+ def filter_age(dataset: datasets.Dataset) -> datasets.Dataset:
40
+ return filter_dataset(dataset, "age")
41
+
42
+
43
+ def filter_religion(dataset: datasets.Dataset) -> datasets.Dataset:
44
+ return filter_dataset(dataset, "religion")
45
+
46
+
47
+ def filter_disability(dataset: datasets.Dataset) -> datasets.Dataset:
48
+ return filter_dataset(dataset, "disability")
49
+
50
+
51
+ def filter_orientation(dataset: datasets.Dataset) -> datasets.Dataset:
52
+ return filter_dataset(dataset, "sexual-orientation")
53
+
54
+
55
+ def filter_nationality(dataset: datasets.Dataset) -> datasets.Dataset:
56
+ return filter_dataset(dataset, "nationality")
57
+
58
+
59
+ def filter_appearance(dataset: datasets.Dataset) -> datasets.Dataset:
60
+ return filter_dataset(dataset, "physical-appearance")
61
+
62
+
63
+ def filter_autre(dataset: datasets.Dataset) -> datasets.Dataset:
64
+ return filter_dataset(dataset, "autre")
lm-evaluation-harness/lm_eval/tasks/super_glue/README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SuperGLUE
2
+
3
+ ### Paper
4
+
5
+ Title: `SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems`
6
+ Abstract: `https://w4ngatang.github.io/static/papers/superglue.pdf`
7
+
8
+ SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language
9
+ understanding tasks.
10
+
11
+ Homepage: https://super.gluebenchmark.com/
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @inproceedings{NEURIPS2019_4496bf24,
17
+ author = {Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel},
18
+ booktitle = {Advances in Neural Information Processing Systems},
19
+ editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
20
+ pages = {},
21
+ publisher = {Curran Associates, Inc.},
22
+ title = {SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
23
+ url = {https://proceedings.neurips.cc/paper/2019/file/4496bf24afe7fab6f046bf4923da8de6-Paper.pdf},
24
+ volume = {32},
25
+ year = {2019}
26
+ }
27
+ ```
28
+
29
+ ### Groups and Tasks
30
+
31
+ #### Groups
32
+
33
+ * `super-glue-lm-eval-v1`: SuperGLUE eval adapted from LM Eval V1
34
+ * `super-glue-t5-prompt`: SuperGLUE prompt and evaluation that matches the T5 paper (if using accelerate, will error if record is included.)
35
+
36
+ #### Tasks
37
+
38
+ Comparison between validation split score on T5x and LM-Eval (T5x models converted to HF)
39
+ | T5V1.1 Base | SGLUE | BoolQ | CB | Copa | MultiRC | ReCoRD | RTE | WiC | WSC |
40
+ | ----------- | ------| ----- | --------- | ---- | ------- | ------ | --- | --- | --- |
41
+ | T5x | 69.47 | 78.47(acc) | 83.93(f1) 87.5(acc) | 50(acc) | 73.81(f1) 33.26(em) | 70.09(em) 71.34(f1) | 78.7(acc) | 63.64(acc) | 75(acc) |
42
+ | LM-Eval | 71.35 | 79.36(acc) | 83.63(f1) 87.5(acc) | 63(acc) | 73.45(f1) 33.26(em) | 69.85(em) 68.86(f1) | 78.34(acc) | 65.83(acc) | 75.96(acc) |
43
+
44
+
45
+
46
+ * `super-glue-lm-eval-v1`
47
+ - `boolq`
48
+ - `cb`
49
+ - `copa`
50
+ - `multirc`
51
+ - `record`
52
+ - `rte`
53
+ - `wic`
54
+ - `wsc`
55
+
56
+ * `super-glue-t5-prompt`
57
+ - `super_glue-boolq-t5-prompt`
58
+ - `super_glue-cb-t5-prompt`
59
+ - `super_glue-copa-t5-prompt`
60
+ - `super_glue-multirc-t5-prompt`
61
+ - `super_glue-record-t5-prompt`
62
+ - `super_glue-rte-t5-prompt`
63
+ - `super_glue-wic-t5-prompt`
64
+ - `super_glue-wsc-t5-prompt`
65
+
66
+ ### Checklist
67
+
68
+ For adding novel benchmarks/datasets to the library:
69
+ * [ ] Is the task an existing benchmark in the literature?
70
+ * [ ] Have you referenced the original paper that introduced the task?
71
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
72
+
73
+
74
+ If other tasks on this dataset are already supported:
75
+ * [ ] Is the "Main" variant of this task clearly denoted?
76
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
77
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/default.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1
3
+ task: boolq
4
+ dataset_path: super_glue
5
+ dataset_name: boolq
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:"
10
+ doc_to_target: label
11
+ doc_to_choice: ["no", "yes"]
12
+ should_decontaminate: true
13
+ doc_to_decontamination_query: passage
14
+ metric_list:
15
+ - metric: acc
16
+ metadata:
17
+ version: 2.0
lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/seq2seq.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1-seq2seq
3
+ task: "boolq-seq2seq"
4
+ dataset_path: super_glue
5
+ dataset_name: boolq
6
+ output_type: generate_until
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: "{{passage}}\nQuestion: {{question}}?\nAnswer:"
10
+ doc_to_target: label
11
+ doc_to_choice: [' no', ' yes']
12
+ target_delimiter: ""
13
+ generation_kwargs:
14
+ until:
15
+ - "\n\n"
16
+ - "\n"
17
+ do_sample: false
18
+ temperature: 0.0
19
+ metric_list:
20
+ - metric: exact_match
21
+ aggregation: mean
22
+ higher_is_better: true
23
+ ignore_case: true
24
+ ignore_punctuation: true
25
+ metadata:
26
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/super_glue/boolq/t5-prompt.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-t5-prompt
3
+ task: super_glue-boolq-t5-prompt
4
+ dataset_path: super_glue
5
+ dataset_name: boolq
6
+ training_split: train
7
+ validation_split: validation
8
+ output_type: generate_until
9
+ doc_to_text: "boolq passage: {{passage}} question: {{question}}"
10
+ doc_to_target: label
11
+ doc_to_choice: ['False', 'True']
12
+ generation_kwargs:
13
+ until:
14
+ - "</s>"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ ignore_case: true
20
+ ignore_punctuation: true
21
+ metadata:
22
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/super_glue/cb/default.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1
3
+ task: cb
4
+ dataset_path: super_glue
5
+ dataset_name: cb
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: "{{premise}}\nQuestion: {{hypothesis}}. True, False, or Neither?\nAnswer:"
10
+ doc_to_target: label
11
+ doc_to_choice: ['True', 'False', 'Neither']
12
+ metric_list:
13
+ - metric: acc
14
+ - metric: f1
15
+ aggregation: !function "aggregate.cb_multi_fi"
16
+ metadata:
17
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5-prompt.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-t5-prompt
3
+ task: super_glue-cb-t5-prompt
4
+ dataset_path: super_glue
5
+ dataset_name: cb
6
+ training_split: train
7
+ validation_split: validation
8
+ output_type: generate_until
9
+ doc_to_text: "cb hypothesis: {{hypothesis}} premise: {{premise}}"
10
+ doc_to_target: label
11
+ doc_to_choice: ['entailment', 'contradiction', 'neutral']
12
+ generation_kwargs:
13
+ until:
14
+ - "</s>"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ ignore_case: true
20
+ ignore_punctuation: true
21
+ - metric: !function "t5_utils.mean_3class_f1"
22
+ aggregation: !function "t5_utils.agg_mean_3class_f1"
23
+ higher_is_better: true
24
+ metadata:
25
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/super_glue/cb/t5_utils.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sklearn.metrics
2
+
3
+
4
+ def mean_3class_f1(predictions, references): # This is a passthrough function
5
+ string_label = ["entailment", "contradiction", "neutral"]
6
+ predictions = (
7
+ string_label.index(predictions[0]) if predictions[0] in string_label else 0
8
+ )
9
+ references = string_label.index(references[0])
10
+
11
+ return (predictions, references)
12
+
13
+
14
+ def agg_mean_3class_f1(items):
15
+ predictions, references = zip(*items)
16
+
17
+ """Computes the unweighted average of the F1 per class."""
18
+ metric_str = "fbeta_score"
19
+ metric_fn_kwargs = {
20
+ "beta": 1,
21
+ "labels": range(3),
22
+ "average": "macro",
23
+ }
24
+
25
+ def _fn(predictions, references):
26
+ metric_fn = getattr(sklearn.metrics, metric_str)
27
+ metric_val = metric_fn(references, predictions, **metric_fn_kwargs)
28
+ return metric_val
29
+
30
+ return _fn(predictions, references)
lm-evaluation-harness/lm_eval/tasks/super_glue/copa/__pycache__/utils.cpython-310.pyc ADDED
Binary file (924 Bytes). View file
 
lm-evaluation-harness/lm_eval/tasks/super_glue/copa/default.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1
3
+ task: copa
4
+ dataset_path: super_glue
5
+ dataset_name: copa
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: !function utils.doc_to_text
10
+ doc_to_target: !function utils.doc_to_target
11
+ doc_to_choice: !function utils.doc_to_choice
12
+ metric_list:
13
+ - metric: acc
14
+ metadata:
15
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/super_glue/copa/t5-prompt.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-t5-prompt
3
+ task: super_glue-copa-t5-prompt
4
+ dataset_path: super_glue
5
+ dataset_name: copa
6
+ training_split: train
7
+ validation_split: validation
8
+ output_type: generate_until
9
+ doc_to_text: "copa choice1: {{choice1}} choice2: {{choice2}} premise: {{premise}} question: {{question}}"
10
+ doc_to_target: label
11
+ doc_to_choice: ['choice1', 'choice2']
12
+ generation_kwargs:
13
+ until:
14
+ - "</s>"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ ignore_case: true
20
+ ignore_punctuation: true
21
+ metadata:
22
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/super_glue/copa/utils.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def convert_choice(choice):
2
+ return choice[0].lower() + choice[1:]
3
+
4
+
5
+ def doc_to_text(doc):
6
+ # Drop the period
7
+ connector = {
8
+ "cause": "because",
9
+ "effect": "therefore",
10
+ }[doc["question"]]
11
+ return doc["premise"].strip()[:-1] + f" {connector}"
12
+
13
+
14
+ def doc_to_target(doc):
15
+ correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]
16
+ # Connect the sentences
17
+ return " " + convert_choice(correct_choice)
18
+
19
+
20
+ def doc_to_choice(doc):
21
+ return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])]
lm-evaluation-harness/lm_eval/tasks/super_glue/wic/default.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1
3
+ task: "wic"
4
+ dataset_path: super_glue
5
+ dataset_name: wic
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Is the word '{{sentence1[start1:end1]}}' used in the same way in the two sentences above?\nAnswer:"
10
+ doc_to_target: label
11
+ doc_to_choice: ['no', 'yes']
12
+ metric_list:
13
+ - metric: acc
14
+ metadata:
15
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/super_glue/wic/t5-prompt.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-t5-prompt
3
+ task: super_glue-wic-t5-prompt
4
+ dataset_path: super_glue
5
+ dataset_name: wic
6
+ training_split: train
7
+ validation_split: validation
8
+ output_type: generate_until
9
+ doc_to_text: "wic sentence1: {{sentence1}} sentence2: {{sentence2}} word: {{word}}"
10
+ doc_to_target: label
11
+ doc_to_choice: ['False', 'True']
12
+ generation_kwargs:
13
+ until:
14
+ - "</s>"
15
+ metric_list:
16
+ - metric: exact_match
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ ignore_case: true
20
+ ignore_punctuation: true
21
+ metadata:
22
+ version: 0.0
lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/default.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-lm-eval-v1
3
+ task: wsc
4
+ dataset_path: super_glue
5
+ dataset_name: wsc.fixed
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ doc_to_text: !function preprocess_wsc.default_doc_to_text
10
+ doc_to_target: label
11
+ doc_to_choice: ['no', 'yes']
12
+ metric_list:
13
+ - metric: acc
14
+ metadata:
15
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/preprocess_wsc.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lm_eval.utils import general_detokenize
2
+
3
+
4
+ def default_doc_to_text(x):
5
+ raw_passage = x["text"]
6
+ # NOTE: HuggingFace span indices are word-based not character-based.
7
+ pre = " ".join(raw_passage.split()[: x["span2_index"]])
8
+ post = raw_passage[len(pre) + len(x["span2_text"]) + 1 :]
9
+ passage = general_detokenize(pre + " *{}*".format(x["span2_text"]) + post)
10
+ noun = x["span1_text"]
11
+ pronoun = x["span2_text"]
12
+ text = (
13
+ f"Passage: {passage}\n"
14
+ + f'Question: In the passage above, does the pronoun "*{pronoun}*" refer to "*{noun}*"?\n'
15
+ + "Answer:"
16
+ )
17
+ return text
lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5-prompt.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - super-glue-t5-prompt
3
+ task: super_glue-wsc-t5-prompt
4
+ dataset_path: super_glue
5
+ dataset_name: wsc.fixed
6
+ training_split: train
7
+ validation_split: validation
8
+ output_type: generate_until
9
+ doc_to_text: !function "t5_utils.doc_to_text"
10
+ process_results: !function "t5_utils.process_results"
11
+ doc_to_target: label
12
+ generation_kwargs:
13
+ until:
14
+ - "</s>"
15
+ metric_list:
16
+ - metric: accuracy
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/super_glue/wsc/t5_utils.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import List
3
+
4
+
5
+ def doc_to_text(x):
6
+ text = re.sub(r" X ", " *" + x["span2_text"] + "* ", _wsc_inputs(x))
7
+ return "wsc: " + text
8
+
9
+
10
+ def _wsc_inputs(x):
11
+ words = x["text"].split(" ")
12
+
13
+ # We would need some special logic to handle the case where the pronoun is the
14
+ # first or last word in the text. None of the examples in WSC seem to have
15
+ # this, so we are ignoring these cases.
16
+ assert x["span2_index"] > 0
17
+ assert x["span2_index"] < len(words)
18
+ pronoun_index = x["span2_index"]
19
+
20
+ def create_input():
21
+ assert words[pronoun_index] == x["span2_text"]
22
+
23
+ return " ".join(
24
+ [
25
+ " ".join(words[:pronoun_index]),
26
+ "X",
27
+ " ".join(words[pronoun_index + 1 :]),
28
+ ]
29
+ )
30
+
31
+ # Handle some special cases.
32
+ if (
33
+ x["text"]
34
+ == 'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. "Good for him," he said. '
35
+ ):
36
+ return (
37
+ "The boy continued to whip the pony , and eventually the pony threw "
38
+ 'him over. John laughed out quite loud. "Good for X ," he said.'
39
+ )
40
+
41
+ # Using the span2_index, we get 'use' instead of 'it'.
42
+ if (
43
+ x["text"]
44
+ == "When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?"
45
+ ):
46
+ return (
47
+ "When they had eventually calmed down a bit , and had gotten home, "
48
+ "Mr. Farley put the magic pebble in an iron safe . Some day they might "
49
+ "want to use X , but really for now, what more could they wish for?"
50
+ )
51
+
52
+ return create_input()
53
+
54
+
55
+ DETERMINERS = {
56
+ "a",
57
+ "an",
58
+ "few",
59
+ "her",
60
+ "his",
61
+ "each",
62
+ "every",
63
+ "many",
64
+ "much",
65
+ "my",
66
+ "our",
67
+ "some",
68
+ "that",
69
+ "the",
70
+ "their",
71
+ "these",
72
+ "this",
73
+ "those",
74
+ "which",
75
+ "whose",
76
+ "your",
77
+ }
78
+
79
+
80
+ def clean(s: str) -> str:
81
+ """Ignore capitalization and determiners."""
82
+ s = s.strip().lower()
83
+ return " ".join([w for w in s.split(" ") if w not in DETERMINERS])
84
+
85
+
86
+ def process_results(docs: dict, resps: List):
87
+ prediction = clean(resps[0])
88
+ reference = clean(docs["span1_text"])
89
+
90
+ if ("'" in prediction) != ("'" in reference):
91
+ # referent is "Bob's hat" as predicting the referent.
92
+ predicted_referent = False
93
+ else:
94
+ prediction_words = set(prediction.split(" "))
95
+ referent_words = set(reference.split(" "))
96
+
97
+ # Handle cases where the prediction is "fuzzy bunny" and the referent is
98
+ # "bunny".
99
+ predicted_referent = prediction_words.issubset(
100
+ referent_words
101
+ ) or referent_words.issubset(prediction_words)
102
+
103
+ acc = 1.0 if predicted_referent == docs["label"] else 0.0
104
+ return {"accuracy": acc}