applied-ai-018 commited on
Commit
0917329
·
verified ·
1 Parent(s): 78c1391

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/aclue/README.md +50 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/aclue/_default_template_yaml +19 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/aclue/_generate_configs.py +81 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml +4 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_literature.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_medical.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml +4 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml +4 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml +4 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml +4 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml +4 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml +4 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml +4 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/blimp/README.md +52 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/blimp/_template_yaml +14 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/blimp/anaphor_number_agreement.yaml +4 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/blimp/animate_subject_passive.yaml +4 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/blimp/animate_subject_trans.yaml +4 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/blimp/causative.yaml +4 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/blimp/complex_NP_island.yaml +4 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml +4 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml +4 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml +4 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml +4 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml +4 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml +4 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml +4 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml +4 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml +4 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml +4 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_subject_raising.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/blimp/expletive_it_object_raising.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/blimp/generate_configs.py +94 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/blimp/inchoative.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/blimp/npi_present_1.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/blimp/npi_present_2.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/blimp/only_npi_licensor_present.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/blimp/only_npi_scope.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/blimp/passive_2.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/blimp/principle_A_c_command.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/aclue/README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACLUE
2
+
3
+ ### Paper
4
+
5
+ Can Large Language Model Comprehend Ancient Chinese? A Preliminary Test on ACLUE
6
+ https://arxiv.org/abs/2310.09550
7
+
8
+ The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese. The benchmark comprises 15 tasks spanning various domains, including lexical, syntactic, semantic, inference, and knowledge. ACLUE's tasks are derived from a combination of manually curated questions from publicly available resources, and automatically
9
+ generated questions from classical Chinese language corpora. The range of questions span from the Xia dynasty (2070 BCE) to the Ming dynasty (1368 CE). ACLUE adopts a multiple-choice question format for all tasks.
10
+
11
+ Homepage: https://github.com/isen-zhang/ACLUE
12
+
13
+ ### Citation
14
+
15
+ ```bibtex
16
+ @inproceedings{zhang-li-2023-large,
17
+ title = "Can Large Langauge Model Comprehend {A}ncient {C}hinese? A Preliminary Test on {ACLUE}",
18
+ author = "Zhang, Yixuan and Li, Haonan",
19
+ booktitle = "Proceedings of the Ancient Language Processing Workshop",
20
+ month = sep,
21
+ year = "2023",
22
+ address = "Varna, Bulgaria",
23
+ publisher = "INCOMA Ltd., Shoumen, Bulgaria",
24
+ url = "https://aclanthology.org/2023.alp-1.9",
25
+ pages = "80--87"
26
+ }
27
+ ```
28
+
29
+ ### Groups and Tasks
30
+
31
+ #### Groups
32
+
33
+ - `aclue`: All 15 subjects of the ACLUE dataset, evaluated following the methodology in CMMLU's original implementation.
34
+
35
+ #### Tasks
36
+
37
+ The following tasks evaluate subjects in the ACLUE dataset using loglikelihood-based multiple-choice scoring:
38
+ - `aclue_{subject_english}`
39
+
40
+ ### Checklist
41
+
42
+ * [x] Is the task an existing benchmark in the literature?
43
+ * [x] Have you referenced the original paper that introduced the task?
44
+ * [x] If yes, does the original paper provide a reference implementation?
45
+ * [x] Yes, original implementation contributed by author of the benchmark
46
+
47
+ If other tasks on this dataset are already supported:
48
+ * [x] Is the "Main" variant of this task clearly denoted?
49
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
50
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/aclue/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: aclue
2
+ dataset_path: tyouisen/aclue
3
+ test_split: test
4
+ fewshot_split: dev
5
+ fewshot_config:
6
+ sampler: first_n
7
+ output_type: multiple_choice
8
+ doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/aclue/_generate_configs.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all other splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import yaml
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.utils import eval_logger
11
+
12
+
13
+ SUBJECTS = {
14
+ "古文单字多义": "polysemy_resolution",
15
+ "诗词情感分类": "poetry_sentiment_analysis",
16
+ "古汉语命名体识别": "named_entity_recognition",
17
+ "古汉语知识": "basic_ancient_chinese",
18
+ "古诗词上下句预测": "poetry_context_prediction",
19
+ "古文断句": "sentence_segmentation",
20
+ "对联": "couplet_prediction",
21
+ "古诗词曲鉴赏": "poetry_appreciate",
22
+ "国学常识": "ancient_chinese_culture",
23
+ "古音学": "ancient_phonetics",
24
+ "通假字": "homographic_character_resolution",
25
+ "古代文学知识": "ancient_literature",
26
+ "医古文": "ancient_medical",
27
+ "古诗词质量评估": "poetry_quality_assessment",
28
+ "古文阅读理解": "reading_comprehension",
29
+ }
30
+
31
+
32
+ def parse_args():
33
+ parser = argparse.ArgumentParser()
34
+ parser.add_argument("--base_yaml_path", required=True)
35
+ parser.add_argument("--save_prefix_path", default="aclue")
36
+ parser.add_argument("--cot_prompt_path", default=None)
37
+ parser.add_argument("--task_prefix", default="")
38
+ return parser.parse_args()
39
+
40
+
41
+ if __name__ == "__main__":
42
+ args = parse_args()
43
+
44
+ # get filename of base_yaml so we can `"include": ` it in our other YAMLs.
45
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
46
+ with open(args.base_yaml_path, encoding="utf-8") as f:
47
+ base_yaml = yaml.full_load(f)
48
+
49
+ if args.cot_prompt_path is not None:
50
+ import json
51
+
52
+ with open(args.cot_prompt_path, encoding="utf-8") as f:
53
+ cot_file = json.load(f)
54
+
55
+ for subject_zh, subject_eng in tqdm(SUBJECTS.items()):
56
+ if args.cot_prompt_path is not None:
57
+ description = cot_file[subject_eng]
58
+ else:
59
+ description = (
60
+ f"以下是关于{subject_zh}的单项选择题,请直接给出正确答案的选项。\n\n"
61
+ )
62
+
63
+ yaml_dict = {
64
+ "include": base_yaml_name,
65
+ "task": f"aclue_{args.task_prefix}_{subject_eng}"
66
+ if args.task_prefix != ""
67
+ else f"aclue_{subject_eng}",
68
+ "dataset_name": subject_eng,
69
+ "description": description,
70
+ }
71
+
72
+ file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
73
+ eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}")
74
+ with open(file_save_path, "w", encoding="utf-8") as yaml_file:
75
+ yaml.dump(
76
+ yaml_dict,
77
+ yaml_file,
78
+ width=float("inf"),
79
+ allow_unicode=True,
80
+ default_style='"',
81
+ )
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_chinese_culture"
2
+ "description": "以下是关于国学常识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_chinese_culture"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_literature.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_literature"
2
+ "description": "以下是关于古代文学知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_literature"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_medical.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_medical"
2
+ "description": "以下是关于医古文的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_medical"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ancient_phonetics"
2
+ "description": "以下是关于古音学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_ancient_phonetics"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "basic_ancient_chinese"
2
+ "description": "以下是关于古汉语知识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_basic_ancient_chinese"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_couplet_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "couplet_prediction"
2
+ "description": "以下是关于对联的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_couplet_prediction"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "homographic_character_resolution"
2
+ "description": "以下是关于通假字的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_homographic_character_resolution"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_context_prediction"
2
+ "description": "以下是关于古诗词上下句预测的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_context_prediction"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "poetry_sentiment_analysis"
2
+ "description": "以下是关于诗词情感分类的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_poetry_sentiment_analysis"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "polysemy_resolution"
2
+ "description": "以下是关于古文单字多义的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_polysemy_resolution"
lm-evaluation/build/lib/lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "sentence_segmentation"
2
+ "description": "以下是关于古文断句的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "aclue_sentence_segmentation"
lm-evaluation/build/lib/lm_eval/tasks/blimp/README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task-name
2
+
3
+ ### Paper
4
+
5
+ Title: `BLiMP: A Benchmark of Linguistic Minimal Pairs for English`
6
+ Abstract: `https://arxiv.org/abs/1912.00582`
7
+
8
+ BLiMP is a challenge set for evaluating what language models (LMs) know about
9
+ major grammatical phenomena in English. BLiMP consists of 67 sub-datasets, each
10
+ containing 1000 minimal pairs isolating specific contrasts in syntax, morphology,
11
+ or semantics. The data is automatically generated according to expert-crafted
12
+ grammars.
13
+
14
+ Homepage: https://github.com/alexwarstadt/blimp
15
+
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @article{warstadt2019blimp,
21
+ author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.},
22
+ title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English},
23
+ journal = {Transactions of the Association for Computational Linguistics},
24
+ volume = {8},
25
+ number = {},
26
+ pages = {377-392},
27
+ year = {2020},
28
+ doi = {10.1162/tacl\_a\_00321},
29
+ URL = {https://doi.org/10.1162/tacl_a_00321},
30
+ eprint = {https://doi.org/10.1162/tacl_a_00321},
31
+ abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. }
32
+ }
33
+ ```
34
+
35
+ ### Subtasks
36
+
37
+ List or describe tasks defined in this folder, and their names here:
38
+ * `task_name`: `1-sentence description of what this particular task does`
39
+ * `task_name2`: .....
40
+
41
+ ### Checklist
42
+
43
+ For adding novel benchmarks/datasets to the library:
44
+ * [ ] Is the task an existing benchmark in the literature?
45
+ * [ ] Have you referenced the original paper that introduced the task?
46
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
47
+
48
+
49
+ If other tasks on this dataset are already supported:
50
+ * [ ] Is the "Main" variant of this task clearly denoted?
51
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
52
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/blimp/_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: blimp
2
+ dataset_path: blimp
3
+ output_type: multiple_choice
4
+ validation_split: train
5
+ doc_to_text: ""
6
+ doc_to_target: 0
7
+ doc_to_choice: "{{[sentence_good, sentence_bad]}}"
8
+ num_fewshot: 0
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{sentence_good}} {{sentence_bad}}"
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/blimp/anaphor_number_agreement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: anaphor_number_agreement
3
+ include: _template_yaml
4
+ task: blimp_anaphor_number_agreement
lm-evaluation/build/lib/lm_eval/tasks/blimp/animate_subject_passive.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: animate_subject_passive
3
+ include: _template_yaml
4
+ task: blimp_animate_subject_passive
lm-evaluation/build/lib/lm_eval/tasks/blimp/animate_subject_trans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: animate_subject_trans
3
+ include: _template_yaml
4
+ task: blimp_animate_subject_trans
lm-evaluation/build/lib/lm_eval/tasks/blimp/causative.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causative
3
+ include: _template_yaml
4
+ task: blimp_causative
lm-evaluation/build/lib/lm_eval/tasks/blimp/complex_NP_island.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: complex_NP_island
3
+ include: _template_yaml
4
+ task: blimp_complex_NP_island
lm-evaluation/build/lib/lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: coordinate_structure_constraint_complex_left_branch
3
+ include: _template_yaml
4
+ task: blimp_coordinate_structure_constraint_complex_left_branch
lm-evaluation/build/lib/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: coordinate_structure_constraint_object_extraction
3
+ include: _template_yaml
4
+ task: blimp_coordinate_structure_constraint_object_extraction
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_1
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_irregular_1
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_irregular_1
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_irregular_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_irregular_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: determiner_noun_agreement_with_adj_irregular_2
3
+ include: _template_yaml
4
+ task: blimp_determiner_noun_agreement_with_adj_irregular_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: distractor_agreement_relational_noun
3
+ include: _template_yaml
4
+ task: blimp_distractor_agreement_relational_noun
lm-evaluation/build/lib/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: distractor_agreement_relative_clause
3
+ include: _template_yaml
4
+ task: blimp_distractor_agreement_relative_clause
lm-evaluation/build/lib/lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ellipsis_n_bar_1
3
+ include: _template_yaml
4
+ task: blimp_ellipsis_n_bar_1
lm-evaluation/build/lib/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ellipsis_n_bar_2
3
+ include: _template_yaml
4
+ task: blimp_ellipsis_n_bar_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_quantifiers_1
3
+ include: _template_yaml
4
+ task: blimp_existential_there_quantifiers_1
lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_quantifiers_2
3
+ include: _template_yaml
4
+ task: blimp_existential_there_quantifiers_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/existential_there_subject_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: existential_there_subject_raising
3
+ include: _template_yaml
4
+ task: blimp_existential_there_subject_raising
lm-evaluation/build/lib/lm_eval/tasks/blimp/expletive_it_object_raising.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: expletive_it_object_raising
3
+ include: _template_yaml
4
+ task: blimp_expletive_it_object_raising
lm-evaluation/build/lib/lm_eval/tasks/blimp/generate_configs.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+
3
+
4
+ all_subtasks = [
5
+ "adjunct_island",
6
+ "anaphor_gender_agreement",
7
+ "anaphor_number_agreement",
8
+ "animate_subject_passive",
9
+ "animate_subject_trans",
10
+ "causative",
11
+ "complex_NP_island",
12
+ "coordinate_structure_constraint_complex_left_branch",
13
+ "coordinate_structure_constraint_object_extraction",
14
+ "determiner_noun_agreement_1",
15
+ "determiner_noun_agreement_2",
16
+ "determiner_noun_agreement_irregular_1",
17
+ "determiner_noun_agreement_irregular_2",
18
+ "determiner_noun_agreement_with_adj_2",
19
+ "determiner_noun_agreement_with_adj_irregular_1",
20
+ "determiner_noun_agreement_with_adj_irregular_2",
21
+ "determiner_noun_agreement_with_adjective_1",
22
+ "distractor_agreement_relational_noun",
23
+ "distractor_agreement_relative_clause",
24
+ "drop_argument",
25
+ "ellipsis_n_bar_1",
26
+ "ellipsis_n_bar_2",
27
+ "existential_there_object_raising",
28
+ "existential_there_quantifiers_1",
29
+ "existential_there_quantifiers_2",
30
+ "existential_there_subject_raising",
31
+ "expletive_it_object_raising",
32
+ "inchoative",
33
+ "intransitive",
34
+ "irregular_past_participle_adjectives",
35
+ "irregular_past_participle_verbs",
36
+ "irregular_plural_subject_verb_agreement_1",
37
+ "irregular_plural_subject_verb_agreement_2",
38
+ "left_branch_island_echo_question",
39
+ "left_branch_island_simple_question",
40
+ "matrix_question_npi_licensor_present",
41
+ "npi_present_1",
42
+ "npi_present_2",
43
+ "only_npi_licensor_present",
44
+ "only_npi_scope",
45
+ "passive_1",
46
+ "passive_2",
47
+ "principle_A_c_command",
48
+ "principle_A_case_1",
49
+ "principle_A_case_2",
50
+ "principle_A_domain_1",
51
+ "principle_A_domain_2",
52
+ "principle_A_domain_3",
53
+ "principle_A_reconstruction",
54
+ "regular_plural_subject_verb_agreement_1",
55
+ "regular_plural_subject_verb_agreement_2",
56
+ "sentential_negation_npi_licensor_present",
57
+ "sentential_negation_npi_scope",
58
+ "sentential_subject_island",
59
+ "superlative_quantifiers_1",
60
+ "superlative_quantifiers_2",
61
+ "tough_vs_raising_1",
62
+ "tough_vs_raising_2",
63
+ "transitive",
64
+ "wh_island",
65
+ "wh_questions_object_gap",
66
+ "wh_questions_subject_gap",
67
+ "wh_questions_subject_gap_long_distance",
68
+ "wh_vs_that_no_gap",
69
+ "wh_vs_that_no_gap_long_distance",
70
+ "wh_vs_that_with_gap",
71
+ "wh_vs_that_with_gap_long_distance",
72
+ ]
73
+
74
+
75
+ def main() -> None:
76
+ for task in all_subtasks:
77
+ file_name = f"{task}.yaml"
78
+ try:
79
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
80
+ f.write("# Generated by utils.py\n")
81
+ yaml.dump(
82
+ {
83
+ "include": "_template_yaml",
84
+ "task": "blimp_" + task,
85
+ "dataset_name": task,
86
+ },
87
+ f,
88
+ )
89
+ except FileExistsError:
90
+ pass
91
+
92
+
93
+ if __name__ == "__main__":
94
+ main()
lm-evaluation/build/lib/lm_eval/tasks/blimp/inchoative.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: inchoative
3
+ include: _template_yaml
4
+ task: blimp_inchoative
lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_past_participle_adjectives
3
+ include: _template_yaml
4
+ task: blimp_irregular_past_participle_adjectives
lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_past_participle_verbs
3
+ include: _template_yaml
4
+ task: blimp_irregular_past_participle_verbs
lm-evaluation/build/lib/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irregular_plural_subject_verb_agreement_2
3
+ include: _template_yaml
4
+ task: blimp_irregular_plural_subject_verb_agreement_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: left_branch_island_echo_question
3
+ include: _template_yaml
4
+ task: blimp_left_branch_island_echo_question
lm-evaluation/build/lib/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: matrix_question_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_matrix_question_npi_licensor_present
lm-evaluation/build/lib/lm_eval/tasks/blimp/npi_present_1.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_1
3
+ include: _template_yaml
4
+ task: blimp_npi_present_1
lm-evaluation/build/lib/lm_eval/tasks/blimp/npi_present_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: npi_present_2
3
+ include: _template_yaml
4
+ task: blimp_npi_present_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/only_npi_licensor_present.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: only_npi_licensor_present
3
+ include: _template_yaml
4
+ task: blimp_only_npi_licensor_present
lm-evaluation/build/lib/lm_eval/tasks/blimp/only_npi_scope.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: only_npi_scope
3
+ include: _template_yaml
4
+ task: blimp_only_npi_scope
lm-evaluation/build/lib/lm_eval/tasks/blimp/passive_2.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: passive_2
3
+ include: _template_yaml
4
+ task: blimp_passive_2
lm-evaluation/build/lib/lm_eval/tasks/blimp/principle_A_c_command.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: principle_A_c_command
3
+ include: _template_yaml
4
+ task: blimp_principle_A_c_command