Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/lm_eval/tasks/anli/README.md +56 -0
- lm-evaluation/lm_eval/tasks/anli/anli_r1.yaml +26 -0
- lm-evaluation/lm_eval/tasks/anli/anli_r2.yaml +5 -0
- lm-evaluation/lm_eval/tasks/anli/anli_r3.yaml +5 -0
- lm-evaluation/lm_eval/tasks/eus_exams/README.md +49 -0
- lm-evaluation/lm_eval/tasks/eus_exams/configs.py +67 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams +18 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml +4 -0
- lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml +4 -0
lm-evaluation/lm_eval/tasks/anli/README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ANLI
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `Adversarial NLI: A New Benchmark for Natural Language Understanding`
|
6 |
+
|
7 |
+
Paper Link: https://arxiv.org/abs/1910.14599
|
8 |
+
|
9 |
+
Adversarial NLI (ANLI) is a dataset collected via an iterative, adversarial
|
10 |
+
human-and-model-in-the-loop procedure. It consists of three rounds that progressively
|
11 |
+
increase in difficulty and complexity, and each question-answer includes annotator-
|
12 |
+
provided explanations.
|
13 |
+
|
14 |
+
Homepage: https://github.com/facebookresearch/anli
|
15 |
+
|
16 |
+
### Citation
|
17 |
+
|
18 |
+
```
|
19 |
+
@inproceedings{nie-etal-2020-adversarial,
|
20 |
+
title = "Adversarial {NLI}: A New Benchmark for Natural Language Understanding",
|
21 |
+
author = "Nie, Yixin and
|
22 |
+
Williams, Adina and
|
23 |
+
Dinan, Emily and
|
24 |
+
Bansal, Mohit and
|
25 |
+
Weston, Jason and
|
26 |
+
Kiela, Douwe",
|
27 |
+
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
|
28 |
+
year = "2020",
|
29 |
+
publisher = "Association for Computational Linguistics",
|
30 |
+
}
|
31 |
+
```
|
32 |
+
|
33 |
+
### Groups and Tasks
|
34 |
+
|
35 |
+
#### Groups
|
36 |
+
|
37 |
+
* `anli`: Evaluates `anli_r1`, `anli_r2`, and `anli_r3`
|
38 |
+
|
39 |
+
#### Tasks
|
40 |
+
* `anli_r1`: The data collected adversarially in the first round.
|
41 |
+
* `anli_r2`: The data collected adversarially in the second round, after training on the previous round's data.
|
42 |
+
* `anli_r3`: The data collected adversarially in the third round, after training on the previous multiple rounds of data.
|
43 |
+
|
44 |
+
|
45 |
+
### Checklist
|
46 |
+
|
47 |
+
For adding novel benchmarks/datasets to the library:
|
48 |
+
* [x] Is the task an existing benchmark in the literature?
|
49 |
+
* [x] Have you referenced the original paper that introduced the task?
|
50 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
51 |
+
|
52 |
+
|
53 |
+
If other tasks on this dataset are already supported:
|
54 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
55 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
56 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/anli/anli_r1.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- anli
|
3 |
+
task: anli_r1
|
4 |
+
dataset_path: anli
|
5 |
+
dataset_name: null
|
6 |
+
output_type: multiple_choice
|
7 |
+
training_split: train_r1
|
8 |
+
validation_split: dev_r1
|
9 |
+
test_split: test_r1
|
10 |
+
doc_to_text: "{{premise}}\nQuestion: {{hypothesis}} True, False, or Neither?\nAnswer:"
|
11 |
+
# True = entailment
|
12 |
+
# False = contradiction
|
13 |
+
# Neither = neutral
|
14 |
+
doc_to_target: "{{['True', 'Neither', 'False'][label]}}"
|
15 |
+
doc_to_choice:
|
16 |
+
- "True"
|
17 |
+
- "Neither"
|
18 |
+
- "False"
|
19 |
+
should_decontaminate: true
|
20 |
+
doc_to_decontamination_query: premise
|
21 |
+
metric_list:
|
22 |
+
- metric: acc
|
23 |
+
aggregation: mean
|
24 |
+
higher_is_better: true
|
25 |
+
metadata:
|
26 |
+
version: 1.0
|
lm-evaluation/lm_eval/tasks/anli/anli_r2.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: anli_r1.yaml
|
2 |
+
task: anli_r2
|
3 |
+
training_split: train_r2
|
4 |
+
validation_split: dev_r2
|
5 |
+
test_split: test_r2
|
lm-evaluation/lm_eval/tasks/anli/anli_r3.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: anli_r1.yaml
|
2 |
+
task: anli_r3
|
3 |
+
training_split: train_r3
|
4 |
+
validation_split: dev_r3
|
5 |
+
test_split: test_r3
|
lm-evaluation/lm_eval/tasks/eus_exams/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# EusExams
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: Latxa: An Open Language Model and Evaluation Suite for Basque
|
6 |
+
|
7 |
+
Abstract: https://arxiv.org/abs/2403.20266
|
8 |
+
|
9 |
+
EusExams is a collection of tests designed to prepare individuals for Public Service examinations conducted by several Basque institutions, including the public health system Osakidetza, the Basque Government, the City Councils of Bilbao and Gasteiz, and the University of the Basque Country (UPV/EHU). Within each of these groups, there are different exams for public positions, such as administrative and assistant roles. Each multiple-choice question contains 2 to 4 choices (3.90 on average) and one correct answer. The dataset is mostly parallel with 16k questions in Basque and 18k in Spanish.
|
10 |
+
|
11 |
+
Homepage: https://github.com/hitz-zentroa/latxa
|
12 |
+
|
13 |
+
|
14 |
+
### Citation
|
15 |
+
|
16 |
+
```
|
17 |
+
@misc{etxaniz2024latxa,
|
18 |
+
title={Latxa: An Open Language Model and Evaluation Suite for Basque},
|
19 |
+
author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
|
20 |
+
year={2024},
|
21 |
+
eprint={2403.20266},
|
22 |
+
archivePrefix={arXiv},
|
23 |
+
primaryClass={cs.CL}
|
24 |
+
}
|
25 |
+
```
|
26 |
+
|
27 |
+
### Groups and Tasks
|
28 |
+
|
29 |
+
#### Groups
|
30 |
+
|
31 |
+
* `eus_exams_eu`: The Basque version of the exams.
|
32 |
+
* `eus_exams_es`: The Spanish version of the exams.
|
33 |
+
|
34 |
+
#### Tasks
|
35 |
+
|
36 |
+
Basque and Spanish versions of the exams are available as separate tasks starting with `eus_exams_eu` and `eus_exams_es` respectively.
|
37 |
+
|
38 |
+
### Checklist
|
39 |
+
|
40 |
+
For adding novel benchmarks/datasets to the library:
|
41 |
+
* [ ] Is the task an existing benchmark in the literature?
|
42 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
43 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
44 |
+
|
45 |
+
|
46 |
+
If other tasks on this dataset are already supported:
|
47 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
48 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
49 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/eus_exams/configs.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
|
4 |
+
import requests
|
5 |
+
import yaml
|
6 |
+
|
7 |
+
|
8 |
+
# get configs from huggingface datasets server by doing a request
|
9 |
+
response = requests.get(
|
10 |
+
"https://datasets-server.huggingface.co/splits?dataset=HiTZ%2FEusExams", timeout=5
|
11 |
+
)
|
12 |
+
response_json = json.loads(response.text)
|
13 |
+
CONFIGS = [split["config"] for split in response_json["splits"]]
|
14 |
+
|
15 |
+
|
16 |
+
def gen_config_yamls(output_dir: str, overwrite: bool) -> None:
|
17 |
+
"""
|
18 |
+
Generate a yaml file for each configuage.
|
19 |
+
|
20 |
+
:param output_dir: The directory to output the files to.
|
21 |
+
:param overwrite: Whether to overwrite files if they already exist.
|
22 |
+
"""
|
23 |
+
err = []
|
24 |
+
for config in CONFIGS:
|
25 |
+
file_name = f"eus_exams_{config}.yaml"
|
26 |
+
try:
|
27 |
+
with open(f"{output_dir}/{file_name}", "w" if overwrite else "x") as f:
|
28 |
+
f.write("# Generated by utils.py\n")
|
29 |
+
yaml.dump(
|
30 |
+
{
|
31 |
+
"include": "eus_exams_es"
|
32 |
+
if "eus_exams_es" in config
|
33 |
+
else "eus_exams_eu",
|
34 |
+
"dataset_name": config,
|
35 |
+
"task": f"eus_exams_{config}",
|
36 |
+
},
|
37 |
+
f,
|
38 |
+
)
|
39 |
+
except FileExistsError:
|
40 |
+
err.append(file_name)
|
41 |
+
|
42 |
+
if len(err) > 0:
|
43 |
+
raise FileExistsError(
|
44 |
+
"Files were not created because they already exist (use --overwrite flag):"
|
45 |
+
f" {', '.join(err)}"
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
def main() -> None:
|
50 |
+
"""Parse CLI args and generate configuage-specific yaml files."""
|
51 |
+
parser = argparse.ArgumentParser()
|
52 |
+
parser.add_argument(
|
53 |
+
"--overwrite",
|
54 |
+
default=False,
|
55 |
+
action="store_true",
|
56 |
+
help="Overwrite files if they already exist",
|
57 |
+
)
|
58 |
+
parser.add_argument(
|
59 |
+
"--output-dir", default=".", help="Directory to write yaml files to"
|
60 |
+
)
|
61 |
+
args = parser.parse_args()
|
62 |
+
|
63 |
+
gen_config_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
|
64 |
+
|
65 |
+
|
66 |
+
if __name__ == "__main__":
|
67 |
+
main()
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dataset_path: HiTZ/EusExams
|
2 |
+
dataset_name: null
|
3 |
+
validation_split: null
|
4 |
+
test_split: test
|
5 |
+
fewshot_split: test
|
6 |
+
process_docs: !function utils.process_docs
|
7 |
+
output_type: multiple_choice
|
8 |
+
doc_to_choice: ["A", "B", "C", "D"]
|
9 |
+
doc_to_target: answer
|
10 |
+
metric_list:
|
11 |
+
- metric: acc
|
12 |
+
aggregation: mean
|
13 |
+
higher_is_better: true
|
14 |
+
- metric: acc_norm
|
15 |
+
aggregation: mean
|
16 |
+
higher_is_better: true
|
17 |
+
metadata:
|
18 |
+
version: 0.0
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: eus_exams
|
2 |
+
group:
|
3 |
+
- eus_exams_es
|
4 |
+
doc_to_text: "Pregunta: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nRespuesta:"
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_ejadministrativo
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_ejadministrativo
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_ejtecnico
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_ejtecnico
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeayuntamientovitoria
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeayuntamientovitoria
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opebilbao
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opebilbao
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehuadmin
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehuadmin
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehubiblio
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehubiblio
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehuderecho
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehuderecho
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehueconomicas
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehueconomicas
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehuempresariales
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehuempresariales
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehusubalterno
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehusubalterno
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehutecnico
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehutecnico
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeehutecnicob
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeehutecnicob
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakiadmin
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakiadmin
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakiaux
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakiaux
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakiauxenf
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakiauxenf
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakicelador
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakicelador
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakijuridico
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakijuridico
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakioperario
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakioperario
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakitecnico
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakitecnico
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_opeosakivarios
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_opeosakivarios
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza1c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza1c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza2c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza2c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza3c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza3c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza6c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza6c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza7c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza7c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza8c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza8c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: es_osakidetza9c
|
3 |
+
include: eus_exams_es
|
4 |
+
task: eus_exams_es_osakidetza9c
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: eus_exams
|
2 |
+
group:
|
3 |
+
- eus_exams_eu
|
4 |
+
doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:"
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_ejlaguntza
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_ejlaguntza
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_ejlaguntzaile
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_ejlaguntzaile
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_ejteknikari
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_ejteknikari
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opebilbaoeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opebilbaoeu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehuadmineu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehuadmineu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehuauxeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehuauxeu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehubiblioeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehubiblioeu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehueconomicaseu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehueconomicaseu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehuempresarialeseu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehuempresarialeseu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehusubalternoeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehusubalternoeu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehutecnicoeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehutecnicoeu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeehuteknikarib
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeehuteknikarib
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeosakiadmineu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeosakiadmineu
|
lm-evaluation/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: eu_opeosakiauxenfeu
|
3 |
+
include: eus_exams_eu
|
4 |
+
task: eus_exams_eu_opeosakiauxenfeu
|