applied-ai-018 commited on
Commit
64dbae1
·
verified ·
1 Parent(s): 89c6ce0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/lm_eval/tasks/blimp/_template_yaml +14 -0
  2. lm-evaluation-harness/lm_eval/tasks/blimp/animate_subject_trans.yaml +4 -0
  3. lm-evaluation-harness/lm_eval/tasks/blimp/causative.yaml +4 -0
  4. lm-evaluation-harness/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml +4 -0
  5. lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml +4 -0
  6. lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml +4 -0
  7. lm-evaluation-harness/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml +4 -0
  8. lm-evaluation-harness/lm_eval/tasks/blimp/drop_argument.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_subject_raising.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/blimp/generate_configs.py +94 -0
  12. lm-evaluation-harness/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml +4 -0
  13. lm-evaluation-harness/lm_eval/tasks/blimp/npi_present_1.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/blimp/npi_present_2.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/blimp/only_npi_licensor_present.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/blimp/passive_2.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_c_command.yaml +4 -0
  18. lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_case_1.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_2.yaml +4 -0
  20. lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_3.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml +4 -0
  22. lm-evaluation-harness/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_2.yaml +4 -0
  23. lm-evaluation-harness/lm_eval/tasks/blimp/tough_vs_raising_1.yaml +4 -0
  24. lm-evaluation-harness/lm_eval/tasks/blimp/tough_vs_raising_2.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/blimp/wh_island.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_with_gap_long_distance.yaml +4 -0
  27. lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_bn.yaml +33 -0
  28. lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_mai.yaml +33 -0
  29. lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_mr.yaml +22 -0
  30. lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ta.yaml +33 -0
  31. lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_te.yaml +33 -0
  32. lm-evaluation-harness/lm_eval/tasks/mathqa/README.md +50 -0
  33. lm-evaluation-harness/lm_eval/tasks/mathqa/mathqa.yaml +22 -0
  34. lm-evaluation-harness/lm_eval/tasks/mathqa/utils.py +9 -0
  35. lm-evaluation-harness/lm_eval/tasks/pile/README.md +68 -0
  36. lm-evaluation-harness/lm_eval/tasks/pile/pile_arxiv.yaml +23 -0
  37. lm-evaluation-harness/lm_eval/tasks/pile/pile_bookcorpus2.yaml +3 -0
  38. lm-evaluation-harness/lm_eval/tasks/pile/pile_books3.yaml +3 -0
  39. lm-evaluation-harness/lm_eval/tasks/pile/pile_dm-mathematics.yaml +3 -0
  40. lm-evaluation-harness/lm_eval/tasks/pile/pile_enron.yaml +3 -0
  41. lm-evaluation-harness/lm_eval/tasks/pile/pile_europarl.yaml +3 -0
  42. lm-evaluation-harness/lm_eval/tasks/pile/pile_freelaw.yaml +3 -0
  43. lm-evaluation-harness/lm_eval/tasks/pile/pile_github.yaml +3 -0
  44. lm-evaluation-harness/lm_eval/tasks/pile/pile_gutenberg.yaml +3 -0
  45. lm-evaluation-harness/lm_eval/tasks/pile/pile_hackernews.yaml +3 -0
  46. lm-evaluation-harness/lm_eval/tasks/pile/pile_nih-exporter.yaml +3 -0
  47. lm-evaluation-harness/lm_eval/tasks/pile/pile_opensubtitles.yaml +3 -0
  48. lm-evaluation-harness/lm_eval/tasks/pile/pile_openwebtext2.yaml +3 -0
  49. lm-evaluation-harness/lm_eval/tasks/pile/pile_philpapers.yaml +3 -0
  50. lm-evaluation-harness/lm_eval/tasks/pile/pile_pile-cc.yaml +3 -0
lm-evaluation-harness/lm_eval/tasks/blimp/_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: blimp
2
+ dataset_path: blimp
3
+ output_type: multiple_choice
4
+ validation_split: train
5
+ doc_to_text: ""
6
+ doc_to_target: 0
7
+ doc_to_choice: "{{[sentence_good, sentence_bad]}}"
8
+ num_fewshot: 0
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{sentence_good}} {{sentence_bad}}"
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/blimp/animate_subject_trans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: animate_subject_trans
3
+ include: _template_yaml
4
+ task: blimp_animate_subject_trans
lm-evaluation-harness/lm_eval/tasks/blimp/causative.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causative
3
+ include: _template_yaml
4
+ task: blimp_causative
lm-evaluation-harness/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: coordinate_structure_constraint_complex_left_branch
3
+ include: _template_yaml
4
+ task: blimp_coordinate_structure_constraint_complex_left_branch
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_1
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_irregular_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_irregular_2
lm-evaluation-harness/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: distractor_agreement_relational_noun
3
+ include: _template_yaml
4
+ task: blimp_distractor_agreement_relational_noun
lm-evaluation-harness/lm_eval/tasks/blimp/drop_argument.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: drop_argument
3
+ include: _template_yaml
4
+ task: blimp_drop_argument
lm-evaluation-harness/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ellipsis_n_bar_1
3
+ include: _template_yaml
4
+ task: blimp_ellipsis_n_bar_1
lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_subject_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_subject_raising
3
+ include: _template_yaml
4
+ task: blimp_existential_there_subject_raising
lm-evaluation-harness/lm_eval/tasks/blimp/generate_configs.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+
3
+
4
+ all_subtasks = [
5
+ "adjunct_island",
6
+ "anaphor_gender_agreement",
7
+ "anaphor_number_agreement",
8
+ "animate_subject_passive",
9
+ "animate_subject_trans",
10
+ "causative",
11
+ "complex_NP_island",
12
+ "coordinate_structure_constraint_complex_left_branch",
13
+ "coordinate_structure_constraint_object_extraction",
14
+ "determiner_noun_agreement_1",
15
+ "determiner_noun_agreement_2",
16
+ "determiner_noun_agreement_irregular_1",
17
+ "determiner_noun_agreement_irregular_2",
18
+ "determiner_noun_agreement_with_adj_2",
19
+ "determiner_noun_agreement_with_adj_irregular_1",
20
+ "determiner_noun_agreement_with_adj_irregular_2",
21
+ "determiner_noun_agreement_with_adjective_1",
22
+ "distractor_agreement_relational_noun",
23
+ "distractor_agreement_relative_clause",
24
+ "drop_argument",
25
+ "ellipsis_n_bar_1",
26
+ "ellipsis_n_bar_2",
27
+ "existential_there_object_raising",
28
+ "existential_there_quantifiers_1",
29
+ "existential_there_quantifiers_2",
30
+ "existential_there_subject_raising",
31
+ "expletive_it_object_raising",
32
+ "inchoative",
33
+ "intransitive",
34
+ "irregular_past_participle_adjectives",
35
+ "irregular_past_participle_verbs",
36
+ "irregular_plural_subject_verb_agreement_1",
37
+ "irregular_plural_subject_verb_agreement_2",
38
+ "left_branch_island_echo_question",
39
+ "left_branch_island_simple_question",
40
+ "matrix_question_npi_licensor_present",
41
+ "npi_present_1",
42
+ "npi_present_2",
43
+ "only_npi_licensor_present",
44
+ "only_npi_scope",
45
+ "passive_1",
46
+ "passive_2",
47
+ "principle_A_c_command",
48
+ "principle_A_case_1",
49
+ "principle_A_case_2",
50
+ "principle_A_domain_1",
51
+ "principle_A_domain_2",
52
+ "principle_A_domain_3",
53
+ "principle_A_reconstruction",
54
+ "regular_plural_subject_verb_agreement_1",
55
+ "regular_plural_subject_verb_agreement_2",
56
+ "sentential_negation_npi_licensor_present",
57
+ "sentential_negation_npi_scope",
58
+ "sentential_subject_island",
59
+ "superlative_quantifiers_1",
60
+ "superlative_quantifiers_2",
61
+ "tough_vs_raising_1",
62
+ "tough_vs_raising_2",
63
+ "transitive",
64
+ "wh_island",
65
+ "wh_questions_object_gap",
66
+ "wh_questions_subject_gap",
67
+ "wh_questions_subject_gap_long_distance",
68
+ "wh_vs_that_no_gap",
69
+ "wh_vs_that_no_gap_long_distance",
70
+ "wh_vs_that_with_gap",
71
+ "wh_vs_that_with_gap_long_distance",
72
+ ]
73
+
74
+
75
+ def main() -> None:
76
+ for task in all_subtasks:
77
+ file_name = f"{task}.yaml"
78
+ try:
79
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
80
+ f.write("# Generated by utils.py\n")
81
+ yaml.dump(
82
+ {
83
+ "include": "_template_yaml",
84
+ "task": "blimp_" + task,
85
+ "dataset_name": task,
86
+ },
87
+ f,
88
+ )
89
+ except FileExistsError:
90
+ pass
91
+
92
+
93
+ if __name__ == "__main__":
94
+ main()
lm-evaluation-harness/lm_eval/tasks/blimp/left_branch_island_simple_question.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: left_branch_island_simple_question
3
+ include: _template_yaml
4
+ task: blimp_left_branch_island_simple_question
lm-evaluation-harness/lm_eval/tasks/blimp/npi_present_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_1
3
+ include: _template_yaml
4
+ task: blimp_npi_present_1
lm-evaluation-harness/lm_eval/tasks/blimp/npi_present_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_2
3
+ include: _template_yaml
4
+ task: blimp_npi_present_2
lm-evaluation-harness/lm_eval/tasks/blimp/only_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: only_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_only_npi_licensor_present
lm-evaluation-harness/lm_eval/tasks/blimp/passive_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: passive_2
3
+ include: _template_yaml
4
+ task: blimp_passive_2
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_c_command.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_c_command
3
+ include: _template_yaml
4
+ task: blimp_principle_A_c_command
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_case_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_case_1
3
+ include: _template_yaml
4
+ task: blimp_principle_A_case_1
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_domain_2
3
+ include: _template_yaml
4
+ task: blimp_principle_A_domain_2
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_3.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_domain_3
3
+ include: _template_yaml
4
+ task: blimp_principle_A_domain_3
lm-evaluation-harness/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: regular_plural_subject_verb_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_regular_plural_subject_verb_agreement_1
lm-evaluation-harness/lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: regular_plural_subject_verb_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_regular_plural_subject_verb_agreement_2
lm-evaluation-harness/lm_eval/tasks/blimp/tough_vs_raising_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: tough_vs_raising_1
3
+ include: _template_yaml
4
+ task: blimp_tough_vs_raising_1
lm-evaluation-harness/lm_eval/tasks/blimp/tough_vs_raising_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: tough_vs_raising_2
3
+ include: _template_yaml
4
+ task: blimp_tough_vs_raising_2
lm-evaluation-harness/lm_eval/tasks/blimp/wh_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_island
3
+ include: _template_yaml
4
+ task: blimp_wh_island
lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_with_gap_long_distance.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: wh_vs_that_with_gap_long_distance
3
+ include: _template_yaml
4
+ task: blimp_wh_vs_that_with_gap_long_distance
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_bn.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tbns file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: ai4bharat/IndicCOPA
5
+ dataset_path: ai4bharat/IndicCOPA
6
+ dataset_name: translation-bn
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ # validation_split: validation
10
+ test_split: test
11
+ # doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
12
+ # doc_to_target: label
13
+ # doc_to_choice: "{{choice1}}{{choice2}}"
14
+ # metric_list:
15
+ # - metric: acc
16
+ # aggregation: mean
17
+ # bngher_is_better: true
18
+ # metadata:
19
+ # version: 1.0
20
+
21
+ doc_to_text: !function utils.doc_to_text_bn
22
+ doc_to_target: label
23
+ doc_to_choice: !function utils.doc_to_choice
24
+ metric_list:
25
+ - metric: acc
26
+ metadata:
27
+ version: 1.0
28
+
29
+
30
+ # doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
31
+ # सही? नहीं, "+hypothesis]}}'
32
+ # doc_to_text: ''
33
+ task: indiccopa-bn
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_mai.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tmais file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: ai4bharat/IndicCOPA
5
+ dataset_path: ai4bharat/IndicCOPA
6
+ dataset_name: translation-mai
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ # validation_split: validation
10
+ test_split: test
11
+ # doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
12
+ # doc_to_target: label
13
+ # doc_to_choice: "{{choice1}}{{choice2}}"
14
+ # metric_list:
15
+ # - metric: acc
16
+ # aggregation: mean
17
+ # maigher_is_better: true
18
+ # metadata:
19
+ # version: 1.0
20
+
21
+ doc_to_text: !function utils.doc_to_text_mai
22
+ doc_to_target: label
23
+ doc_to_choice: !function utils.doc_to_choice
24
+ metric_list:
25
+ - metric: acc
26
+ metadata:
27
+ version: 1.0
28
+
29
+
30
+ # doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
31
+ # सही? नहीं, "+hypothesis]}}'
32
+ # doc_to_text: ''
33
+ task: indiccopa-mai
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_mr.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: ai4bharat/IndicCOPA
5
+ dataset_path: ai4bharat/IndicCOPA
6
+ dataset_name: translation-mr
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ # validation_split: validation
10
+ test_split: test
11
+
12
+
13
+ doc_to_text: !function utils.doc_to_text_mr
14
+ doc_to_target: label
15
+ doc_to_choice: !function utils.doc_to_choice
16
+ metric_list:
17
+ - metric: acc
18
+ metadata:
19
+ version: 1.0
20
+
21
+
22
+ task: indiccopa-mr
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_ta.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ttas file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: ai4bharat/IndicCOPA
5
+ dataset_path: ai4bharat/IndicCOPA
6
+ dataset_name: translation-ta
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ # validation_split: validation
10
+ test_split: test
11
+ # doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
12
+ # doc_to_target: label
13
+ # doc_to_choice: "{{choice1}}{{choice2}}"
14
+ # metric_list:
15
+ # - metric: acc
16
+ # aggregation: mean
17
+ # tagher_is_better: true
18
+ # metadata:
19
+ # version: 1.0
20
+
21
+ doc_to_text: !function utils.doc_to_text_ta
22
+ doc_to_target: label
23
+ doc_to_choice: !function utils.doc_to_choice
24
+ metric_list:
25
+ - metric: acc
26
+ metadata:
27
+ version: 1.0
28
+
29
+
30
+ # doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
31
+ # सही? नहीं, "+hypothesis]}}'
32
+ # doc_to_text: ''
33
+ task: indiccopa-ta
lm-evaluation-harness/lm_eval/tasks/indiccopa/indiccopa_te.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ttes file will be included in the generated language-specific task configs.
2
+ # It doesn't have a yaml file extension as it is not meant to be imported directly
3
+ # by the harness.
4
+ group: ai4bharat/IndicCOPA
5
+ dataset_path: ai4bharat/IndicCOPA
6
+ dataset_name: translation-te
7
+ output_type: multiple_choice
8
+ # training_split: train
9
+ # validation_split: validation
10
+ test_split: test
11
+ # doc_to_text: "Premise: {{premise}}\nGiven the premise what is the {{question}}\nPlease Choose Among following 2 choices and label them as 0 for 1st choice and 1 for 2nd choice."
12
+ # doc_to_target: label
13
+ # doc_to_choice: "{{choice1}}{{choice2}}"
14
+ # metric_list:
15
+ # - metric: acc
16
+ # aggregation: mean
17
+ # tegher_is_better: true
18
+ # metadata:
19
+ # version: 1.0
20
+
21
+ doc_to_text: !function utils.doc_to_text_te
22
+ doc_to_target: label
23
+ doc_to_choice: !function utils.doc_to_choice
24
+ metric_list:
25
+ - metric: acc
26
+ metadata:
27
+ version: 1.0
28
+
29
+
30
+ # doc_to_choice: '{{[premise+", सही? हाँ, "+hypothesis,premise+", सही? इसलिए, "+hypothesis,premise+",
31
+ # सही? नहीं, "+hypothesis]}}'
32
+ # doc_to_text: ''
33
+ task: indiccopa-te
lm-evaluation-harness/lm_eval/tasks/mathqa/README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MathQA
2
+
3
+ ### Paper
4
+
5
+ MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms
6
+ https://arxiv.org/pdf/1905.13319.pdf
7
+
8
+ MathQA is a large-scale dataset of 37k English multiple-choice math word problems
9
+ covering multiple math domain categories by modeling operation programs corresponding
10
+ to word problems in the AQuA dataset (Ling et al., 2017).
11
+
12
+ Homepage: https://math-qa.github.io/math-QA/
13
+
14
+
15
+ ### Citation
16
+
17
+ ```
18
+ @misc{amini2019mathqa,
19
+ title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms},
20
+ author={Aida Amini and Saadia Gabriel and Peter Lin and Rik Koncel-Kedziorski and Yejin Choi and Hannaneh Hajishirzi},
21
+ year={2019},
22
+ eprint={1905.13319},
23
+ archivePrefix={arXiv},
24
+ primaryClass={cs.CL}
25
+ }
26
+ ```
27
+
28
+ ### Groups and Tasks
29
+
30
+ #### Groups
31
+
32
+ * `math_word_problems`
33
+
34
+ #### Tasks
35
+
36
+ * `mathqa`: The MathQA dataset, as a multiple choice dataset where the answer choices are not in context.
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [x] Is the task an existing benchmark in the literature?
42
+ * [x] Have you referenced the original paper that introduced the task?
43
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+ * The MathQA dataset predates transformer-based prompted LLMs. We should, however, return to this task to ensure equivalence to the non-CoT version of mathQA used in the Chain-of-Thought paper.
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [x] Is the "Main" variant of this task clearly denoted?
48
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
50
+ * [x] Checked for equivalence with v0.3.0 LM Evaluation Harness
lm-evaluation-harness/lm_eval/tasks/mathqa/mathqa.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - math_word_problems
3
+ task: mathqa
4
+ dataset_path: math_qa
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ test_split: test
9
+ doc_to_text: "Question: {{Problem}}\nAnswer:"
10
+ doc_to_target: "{{['a', 'b', 'c', 'd', 'e'].index(correct)}}"
11
+ doc_to_choice: !function utils.doc_to_choice
12
+ should_decontaminate: true
13
+ doc_to_decontamination_query: "Question: {{Problem}}\nAnswer:"
14
+ metric_list:
15
+ - metric: acc
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ - metric: acc_norm
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ metadata:
22
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/mathqa/utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def doc_to_choice(doc):
5
+ choices = [
6
+ c[4:].rstrip(" ,")
7
+ for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc["options"])
8
+ ]
9
+ return choices
lm-evaluation-harness/lm_eval/tasks/pile/README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The Pile
2
+
3
+ ### Paper
4
+ Title: The Pile: An 800GB Dataset of Diverse Text for Language Modeling
5
+
6
+ Abstract: https://arxiv.org/abs/2101.00027
7
+
8
+ The Pile is a 825 GiB diverse, open source language modelling data set that consists
9
+ of 22 smaller, high-quality datasets combined together. To score well on Pile
10
+ BPB (bits per byte), a model must be able to understand many disparate domains
11
+ including books, github repositories, webpages, chat logs, and medical, physics,
12
+ math, computer science, and philosophy papers.
13
+
14
+ Homepage: https://pile.eleuther.ai/
15
+
16
+ ### Citation
17
+ ```
18
+ @article{pile,
19
+ title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
20
+ author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
21
+ journal={arXiv preprint arXiv:2101.00027},
22
+ year={2020}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ * `pile`
31
+
32
+ #### Tasks
33
+
34
+ * `pile_arxiv`
35
+ * `pile_bookcorpus2`
36
+ * `pile_books3`
37
+ * `pile_dm-mathematics`
38
+ * `pile_enron`
39
+ * `pile_europarl`
40
+ * `pile_freelaw`
41
+ * `pile_github`
42
+ * `pile_gutenberg`
43
+ * `pile_hackernews`
44
+ * `pile_nih-exporter`
45
+ * `pile_opensubtitles`
46
+ * `pile_openwebtext2`
47
+ * `pile_philpapers`
48
+ * `pile_pile-cc`
49
+ * `pile_pubmed-abstracts`
50
+ * `pile_pubmed-central`
51
+ * `pile_stackexchange`
52
+ * `pile_ubuntu-irc`
53
+ * `pile_uspto`
54
+ * `pile_wikipedia`
55
+ * `pile_youtubesubtitles`
56
+
57
+ ### Checklist
58
+
59
+ For adding novel benchmarks/datasets to the library:
60
+ * [ ] Is the task an existing benchmark in the literature?
61
+ * [ ] Have you referenced the original paper that introduced the task?
62
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
63
+
64
+
65
+ If other tasks on this dataset are already supported:
66
+ * [ ] Is the "Main" variant of this task clearly denoted?
67
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
68
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/pile/pile_arxiv.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - pile
3
+ task: pile_arxiv
4
+ dataset_path: EleutherAI/pile
5
+ dataset_name: pile_arxiv
6
+ output_type: loglikelihood_rolling
7
+ test_split: train
8
+ doc_to_text: ""
9
+ doc_to_target: "{{text}}"
10
+ should_decontaminate: true
11
+ doc_to_decontamination_query: "{{text}}"
12
+ metric_list:
13
+ - metric: word_perplexity
14
+ aggregation: weighted_perplexity
15
+ higher_is_better: false
16
+ - metric: byte_perplexity
17
+ aggregation: weighted_perplexity
18
+ higher_is_better: false
19
+ - metric: bits_per_byte
20
+ aggregation: bits_per_byte
21
+ higher_is_better: false
22
+ metadata:
23
+ version: 2.0
lm-evaluation-harness/lm_eval/tasks/pile/pile_bookcorpus2.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_bookcorpus2
3
+ dataset_name: pile_bookcorpus2
lm-evaluation-harness/lm_eval/tasks/pile/pile_books3.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_books3
3
+ dataset_name: pile_books3
lm-evaluation-harness/lm_eval/tasks/pile/pile_dm-mathematics.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_dm-mathematics
3
+ dataset_name: pile_dm-mathematics
lm-evaluation-harness/lm_eval/tasks/pile/pile_enron.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_enron
3
+ dataset_name: pile_enron
lm-evaluation-harness/lm_eval/tasks/pile/pile_europarl.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_europarl
3
+ dataset_name: pile_europarl
lm-evaluation-harness/lm_eval/tasks/pile/pile_freelaw.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_freelaw
3
+ dataset_name: pile_freelaw
lm-evaluation-harness/lm_eval/tasks/pile/pile_github.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_github
3
+ dataset_name: pile_github
lm-evaluation-harness/lm_eval/tasks/pile/pile_gutenberg.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_gutenberg
3
+ dataset_name: pile_gutenberg
lm-evaluation-harness/lm_eval/tasks/pile/pile_hackernews.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_hackernews
3
+ dataset_name: pile_hackernews
lm-evaluation-harness/lm_eval/tasks/pile/pile_nih-exporter.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_nih-exporter
3
+ dataset_name: pile_nih-exporter
lm-evaluation-harness/lm_eval/tasks/pile/pile_opensubtitles.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_opensubtitles
3
+ dataset_name: pile_opensubtitles
lm-evaluation-harness/lm_eval/tasks/pile/pile_openwebtext2.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_openwebtext2
3
+ dataset_name: pile_openwebtext2
lm-evaluation-harness/lm_eval/tasks/pile/pile_philpapers.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_philpapers
3
+ dataset_name: pile_philpapers
lm-evaluation-harness/lm_eval/tasks/pile/pile_pile-cc.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: pile_arxiv.yaml
2
+ task: pile_pile-cc
3
+ dataset_name: pile_pile-cc