Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/README.md +52 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/adjunct_island.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/anaphor_number_agreement.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/animate_subject_passive.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/complex_NP_island.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_object_raising.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/expletive_it_object_raising.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/inchoative.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/intransitive.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/only_npi_scope.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/passive_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_case_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_reconstruction.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/sentential_negation_npi_scope.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/sentential_subject_island.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/transitive.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_object_gap.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/eus_reading/README.md +48 -0
- lm-evaluation-harness/lm_eval/tasks/eus_reading/eus_reading.yaml +16 -0
- lm-evaluation-harness/lm_eval/tasks/eus_reading/utils.py +41 -0
- lm-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_de.yaml +7 -0
- lm-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_eu.yaml +7 -0
.gitattributes
CHANGED
@@ -127,3 +127,4 @@ lm-evaluation-harness/wandb/run-20240605_110038-lwgnduuo/run-lwgnduuo.wandb filt
|
|
127 |
lm-evaluation-harness/wandb/run-20240605_140919-mkdnls2x/run-mkdnls2x.wandb filter=lfs diff=lfs merge=lfs -text
|
128 |
lm-evaluation-harness/wandb/run-20240605_093020-laxetjfu/run-laxetjfu.wandb filter=lfs diff=lfs merge=lfs -text
|
129 |
lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
127 |
lm-evaluation-harness/wandb/run-20240605_140919-mkdnls2x/run-mkdnls2x.wandb filter=lfs diff=lfs merge=lfs -text
|
128 |
lm-evaluation-harness/wandb/run-20240605_093020-laxetjfu/run-laxetjfu.wandb filter=lfs diff=lfs merge=lfs -text
|
129 |
lm-evaluation-harness/wandb/run-20240606_045127-vb760voe/run-vb760voe.wandb filter=lfs diff=lfs merge=lfs -text
|
130 |
+
venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
|
lm-evaluation-harness/lm_eval/tasks/blimp/README.md
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Task-name
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `BLiMP: A Benchmark of Linguistic Minimal Pairs for English`
|
6 |
+
Abstract: `https://arxiv.org/abs/1912.00582`
|
7 |
+
|
8 |
+
BLiMP is a challenge set for evaluating what language models (LMs) know about
|
9 |
+
major grammatical phenomena in English. BLiMP consists of 67 sub-datasets, each
|
10 |
+
containing 1000 minimal pairs isolating specific contrasts in syntax, morphology,
|
11 |
+
or semantics. The data is automatically generated according to expert-crafted
|
12 |
+
grammars.
|
13 |
+
|
14 |
+
Homepage: https://github.com/alexwarstadt/blimp
|
15 |
+
|
16 |
+
|
17 |
+
### Citation
|
18 |
+
|
19 |
+
```
|
20 |
+
@article{warstadt2019blimp,
|
21 |
+
author = {Warstadt, Alex and Parrish, Alicia and Liu, Haokun and Mohananey, Anhad and Peng, Wei and Wang, Sheng-Fu and Bowman, Samuel R.},
|
22 |
+
title = {BLiMP: The Benchmark of Linguistic Minimal Pairs for English},
|
23 |
+
journal = {Transactions of the Association for Computational Linguistics},
|
24 |
+
volume = {8},
|
25 |
+
number = {},
|
26 |
+
pages = {377-392},
|
27 |
+
year = {2020},
|
28 |
+
doi = {10.1162/tacl\_a\_00321},
|
29 |
+
URL = {https://doi.org/10.1162/tacl_a_00321},
|
30 |
+
eprint = {https://doi.org/10.1162/tacl_a_00321},
|
31 |
+
abstract = { We introduce The Benchmark of Linguistic Minimal Pairs (BLiMP),1 a challenge set for evaluating the linguistic knowledge of language models (LMs) on major grammatical phenomena in English. BLiMP consists of 67 individual datasets, each containing 1,000 minimal pairs—that is, pairs of minimally different sentences that contrast in grammatical acceptability and isolate specific phenomenon in syntax, morphology, or semantics. We generate the data according to linguist-crafted grammar templates, and human aggregate agreement with the labels is 96.4\%. We evaluate n-gram, LSTM, and Transformer (GPT-2 and Transformer-XL) LMs by observing whether they assign a higher probability to the acceptable sentence in each minimal pair. We find that state-of-the-art models identify morphological contrasts related to agreement reliably, but they struggle with some subtle semantic and syntactic phenomena, such as negative polarity items and extraction islands. }
|
32 |
+
}
|
33 |
+
```
|
34 |
+
|
35 |
+
### Subtasks
|
36 |
+
|
37 |
+
List or describe tasks defined in this folder, and their names here:
|
38 |
+
* `task_name`: `1-sentence description of what this particular task does`
|
39 |
+
* `task_name2`: .....
|
40 |
+
|
41 |
+
### Checklist
|
42 |
+
|
43 |
+
For adding novel benchmarks/datasets to the library:
|
44 |
+
* [ ] Is the task an existing benchmark in the literature?
|
45 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
46 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
47 |
+
|
48 |
+
|
49 |
+
If other tasks on this dataset are already supported:
|
50 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
51 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
52 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation-harness/lm_eval/tasks/blimp/adjunct_island.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: adjunct_island
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_adjunct_island
|
lm-evaluation-harness/lm_eval/tasks/blimp/anaphor_gender_agreement.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anaphor_gender_agreement
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_anaphor_gender_agreement
|
lm-evaluation-harness/lm_eval/tasks/blimp/anaphor_number_agreement.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anaphor_number_agreement
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_anaphor_number_agreement
|
lm-evaluation-harness/lm_eval/tasks/blimp/animate_subject_passive.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: animate_subject_passive
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_animate_subject_passive
|
lm-evaluation-harness/lm_eval/tasks/blimp/complex_NP_island.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: complex_NP_island
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_complex_NP_island
|
lm-evaluation-harness/lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: coordinate_structure_constraint_object_extraction
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_coordinate_structure_constraint_object_extraction
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_irregular_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_irregular_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_irregular_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_irregular_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_with_adj_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_with_adj_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_with_adj_irregular_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_with_adj_irregular_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: determiner_noun_agreement_with_adjective_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_determiner_noun_agreement_with_adjective_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: distractor_agreement_relative_clause
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_distractor_agreement_relative_clause
|
lm-evaluation-harness/lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ellipsis_n_bar_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_ellipsis_n_bar_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_object_raising.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: existential_there_object_raising
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_existential_there_object_raising
|
lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: existential_there_quantifiers_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_existential_there_quantifiers_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: existential_there_quantifiers_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_existential_there_quantifiers_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/expletive_it_object_raising.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: expletive_it_object_raising
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_expletive_it_object_raising
|
lm-evaluation-harness/lm_eval/tasks/blimp/inchoative.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: inchoative
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_inchoative
|
lm-evaluation-harness/lm_eval/tasks/blimp/intransitive.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: intransitive
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_intransitive
|
lm-evaluation-harness/lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irregular_past_participle_adjectives
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_irregular_past_participle_adjectives
|
lm-evaluation-harness/lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irregular_past_participle_verbs
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_irregular_past_participle_verbs
|
lm-evaluation-harness/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irregular_plural_subject_verb_agreement_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_irregular_plural_subject_verb_agreement_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irregular_plural_subject_verb_agreement_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_irregular_plural_subject_verb_agreement_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/left_branch_island_echo_question.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: left_branch_island_echo_question
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_left_branch_island_echo_question
|
lm-evaluation-harness/lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: matrix_question_npi_licensor_present
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_matrix_question_npi_licensor_present
|
lm-evaluation-harness/lm_eval/tasks/blimp/only_npi_scope.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: only_npi_scope
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_only_npi_scope
|
lm-evaluation-harness/lm_eval/tasks/blimp/passive_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: passive_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_passive_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_case_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: principle_A_case_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_principle_A_case_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_domain_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: principle_A_domain_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_principle_A_domain_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/principle_A_reconstruction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: principle_A_reconstruction
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_principle_A_reconstruction
|
lm-evaluation-harness/lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sentential_negation_npi_licensor_present
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_sentential_negation_npi_licensor_present
|
lm-evaluation-harness/lm_eval/tasks/blimp/sentential_negation_npi_scope.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sentential_negation_npi_scope
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_sentential_negation_npi_scope
|
lm-evaluation-harness/lm_eval/tasks/blimp/sentential_subject_island.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sentential_subject_island
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_sentential_subject_island
|
lm-evaluation-harness/lm_eval/tasks/blimp/superlative_quantifiers_1.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: superlative_quantifiers_1
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_superlative_quantifiers_1
|
lm-evaluation-harness/lm_eval/tasks/blimp/superlative_quantifiers_2.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: superlative_quantifiers_2
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_superlative_quantifiers_2
|
lm-evaluation-harness/lm_eval/tasks/blimp/transitive.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: transitive
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_transitive
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_object_gap.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_questions_object_gap
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_questions_object_gap
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_subject_gap.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_questions_subject_gap
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_questions_subject_gap
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_questions_subject_gap_long_distance
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_questions_subject_gap_long_distance
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_vs_that_no_gap
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_vs_that_no_gap
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_vs_that_no_gap_long_distance
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_vs_that_no_gap_long_distance
|
lm-evaluation-harness/lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: wh_vs_that_with_gap
|
3 |
+
include: _template_yaml
|
4 |
+
task: blimp_wh_vs_that_with_gap
|
lm-evaluation-harness/lm_eval/tasks/eus_reading/README.md
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# EusReading
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: Latxa: An Open Language Model and Evaluation Suite for Basque
|
6 |
+
|
7 |
+
Abstract: https://arxiv.org/abs/2403.20266
|
8 |
+
|
9 |
+
EusReading consists of 352 reading comprehension exercises (irakurmena) sourced from the set of past EGA exams from 1998 to 2008. Each test generally has 10 multiple-choice questions, with 4 choices and a single correct answer. These exercises are more challenging than Belebele due to the complexity and length of the input texts. As a result, EusReading is useful to measure long context understanding of models.
|
10 |
+
|
11 |
+
Homepage: https://github.com/hitz-zentroa/latxa
|
12 |
+
|
13 |
+
|
14 |
+
### Citation
|
15 |
+
|
16 |
+
```
|
17 |
+
@misc{etxaniz2024latxa,
|
18 |
+
title={Latxa: An Open Language Model and Evaluation Suite for Basque},
|
19 |
+
author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
|
20 |
+
year={2024},
|
21 |
+
eprint={2403.20266},
|
22 |
+
archivePrefix={arXiv},
|
23 |
+
primaryClass={cs.CL}
|
24 |
+
}
|
25 |
+
```
|
26 |
+
|
27 |
+
### Groups and Tasks
|
28 |
+
|
29 |
+
#### Groups
|
30 |
+
|
31 |
+
There are no groups.
|
32 |
+
|
33 |
+
#### Tasks
|
34 |
+
|
35 |
+
* `eus_reading`: EusReading consists of 352 reading comprehension exercises (irakurmena) sourced from the set of past EGA exams from 1998 to 2008.
|
36 |
+
|
37 |
+
### Checklist
|
38 |
+
|
39 |
+
For adding novel benchmarks/datasets to the library:
|
40 |
+
* [ ] Is the task an existing benchmark in the literature?
|
41 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
42 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
43 |
+
|
44 |
+
|
45 |
+
If other tasks on this dataset are already supported:
|
46 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
47 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
48 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation-harness/lm_eval/tasks/eus_reading/eus_reading.yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: HiTZ/EusReading
|
2 |
+
dataset_name: default
|
3 |
+
task: eus_reading
|
4 |
+
doc_to_text: !function utils.doc_to_text_context
|
5 |
+
doc_to_choice: !function utils.doc_to_choice
|
6 |
+
validation_split: null
|
7 |
+
test_split: test
|
8 |
+
fewshot_split: test
|
9 |
+
output_type: multiple_choice
|
10 |
+
doc_to_target: answer
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
aggregation: mean
|
14 |
+
higher_is_better: true
|
15 |
+
metadata:
|
16 |
+
version: 0.0
|
lm-evaluation-harness/lm_eval/tasks/eus_reading/utils.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
|
4 |
+
letters = ["A", "B", "C", "D"]
|
5 |
+
|
6 |
+
|
7 |
+
def doc_to_text_context(doc) -> str:
|
8 |
+
"""
|
9 |
+
Converts a document to a formatted string.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
doc (dict): A dictionary containing the document information.
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
str: A formatted string containing the question and answer choices.
|
16 |
+
"""
|
17 |
+
candidates = doc["candidates"]
|
18 |
+
num_choices = len(candidates)
|
19 |
+
if num_choices < 2:
|
20 |
+
raise ValueError("Invalid number of candidates")
|
21 |
+
choices = letters[:num_choices]
|
22 |
+
formatted_choices = "\n".join(
|
23 |
+
[f"{choice}: {candidates[i]}" for i, choice in enumerate(choices)]
|
24 |
+
)
|
25 |
+
return f"Pasartea: {doc['context']}\n\nGaldera: {doc['question']}\n{formatted_choices}\nErantzuna:"
|
26 |
+
|
27 |
+
|
28 |
+
def doc_to_choice(doc) -> List[str]:
|
29 |
+
"""
|
30 |
+
Returns the answer choices for a document.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
doc (dict): A dictionary containing the document information.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
list: A list of strings containing the answer choices.
|
37 |
+
"""
|
38 |
+
num_choices = len(doc["candidates"])
|
39 |
+
if num_choices < 2:
|
40 |
+
raise ValueError("Invalid number of candidates")
|
41 |
+
return letters[:num_choices]
|
lm-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_de.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: _arc_yaml
|
2 |
+
task: arc_de
|
3 |
+
dataset_path: alexandrainst/m_arc
|
4 |
+
dataset_name: de
|
5 |
+
training_split: train
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
lm-evaluation-harness/lm_eval/tasks/okapi/arc_multilingual/arc_eu.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: _arc_yaml
|
2 |
+
task: arc_eu
|
3 |
+
dataset_path: alexandrainst/m_arc
|
4 |
+
dataset_name: eu
|
5 |
+
training_split: train
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|