applied-ai-018 commited on
Commit
96a9cb3
·
verified ·
1 Parent(s): 4bbd3bb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/README.md +49 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/configs.py +67 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams +18 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml +4 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml +4 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml +4 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml +4 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml +4 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml +4 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml +4 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml +4 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml +4 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml +4 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml +4 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml +4 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml +4 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml +4 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml +4 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml +4 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml +4 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml +4 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml +4 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml +4 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml +4 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml +4 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml +4 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml +4 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml +4 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml +4 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EusExams
2
+
3
+ ### Paper
4
+
5
+ Title: Latxa: An Open Language Model and Evaluation Suite for Basque
6
+
7
+ Abstract: https://arxiv.org/abs/2403.20266
8
+
9
+ EusExams is a collection of tests designed to prepare individuals for Public Service examinations conducted by several Basque institutions, including the public health system Osakidetza, the Basque Government, the City Councils of Bilbao and Gasteiz, and the University of the Basque Country (UPV/EHU). Within each of these groups, there are different exams for public positions, such as administrative and assistant roles. Each multiple-choice question contains 2 to 4 choices (3.90 on average) and one correct answer. The dataset is mostly parallel with 16k questions in Basque and 18k in Spanish.
10
+
11
+ Homepage: https://github.com/hitz-zentroa/latxa
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @misc{etxaniz2024latxa,
18
+ title={Latxa: An Open Language Model and Evaluation Suite for Basque},
19
+ author={Julen Etxaniz and Oscar Sainz and Naiara Perez and Itziar Aldabe and German Rigau and Eneko Agirre and Aitor Ormazabal and Mikel Artetxe and Aitor Soroa},
20
+ year={2024},
21
+ eprint={2403.20266},
22
+ archivePrefix={arXiv},
23
+ primaryClass={cs.CL}
24
+ }
25
+ ```
26
+
27
+ ### Groups and Tasks
28
+
29
+ #### Groups
30
+
31
+ * `eus_exams_eu`: The Basque version of the exams.
32
+ * `eus_exams_es`: The Spanish version of the exams.
33
+
34
+ #### Tasks
35
+
36
+ Basque and Spanish versions of the exams are available as separate tasks starting with `eus_exams_eu` and `eus_exams_es` respectively.
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [ ] Is the task an existing benchmark in the literature?
42
+ * [ ] Have you referenced the original paper that introduced the task?
43
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [ ] Is the "Main" variant of this task clearly denoted?
48
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/configs.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+
4
+ import requests
5
+ import yaml
6
+
7
+
8
+ # get configs from huggingface datasets server by doing a request
9
+ response = requests.get(
10
+ "https://datasets-server.huggingface.co/splits?dataset=HiTZ%2FEusExams", timeout=5
11
+ )
12
+ response_json = json.loads(response.text)
13
+ CONFIGS = [split["config"] for split in response_json["splits"]]
14
+
15
+
16
+ def gen_config_yamls(output_dir: str, overwrite: bool) -> None:
17
+ """
18
+ Generate a yaml file for each configuage.
19
+
20
+ :param output_dir: The directory to output the files to.
21
+ :param overwrite: Whether to overwrite files if they already exist.
22
+ """
23
+ err = []
24
+ for config in CONFIGS:
25
+ file_name = f"eus_exams_{config}.yaml"
26
+ try:
27
+ with open(f"{output_dir}/{file_name}", "w" if overwrite else "x") as f:
28
+ f.write("# Generated by utils.py\n")
29
+ yaml.dump(
30
+ {
31
+ "include": "eus_exams_es"
32
+ if "eus_exams_es" in config
33
+ else "eus_exams_eu",
34
+ "dataset_name": config,
35
+ "task": f"eus_exams_{config}",
36
+ },
37
+ f,
38
+ )
39
+ except FileExistsError:
40
+ err.append(file_name)
41
+
42
+ if len(err) > 0:
43
+ raise FileExistsError(
44
+ "Files were not created because they already exist (use --overwrite flag):"
45
+ f" {', '.join(err)}"
46
+ )
47
+
48
+
49
+ def main() -> None:
50
+ """Parse CLI args and generate configuage-specific yaml files."""
51
+ parser = argparse.ArgumentParser()
52
+ parser.add_argument(
53
+ "--overwrite",
54
+ default=False,
55
+ action="store_true",
56
+ help="Overwrite files if they already exist",
57
+ )
58
+ parser.add_argument(
59
+ "--output-dir", default=".", help="Directory to write yaml files to"
60
+ )
61
+ args = parser.parse_args()
62
+
63
+ gen_config_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
64
+
65
+
66
+ if __name__ == "__main__":
67
+ main()
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: HiTZ/EusExams
2
+ dataset_name: null
3
+ validation_split: null
4
+ test_split: test
5
+ fewshot_split: test
6
+ process_docs: !function utils.process_docs
7
+ output_type: multiple_choice
8
+ doc_to_choice: ["A", "B", "C", "D"]
9
+ doc_to_target: answer
10
+ metric_list:
11
+ - metric: acc
12
+ aggregation: mean
13
+ higher_is_better: true
14
+ - metric: acc_norm
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ metadata:
18
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_ejadministrativo
3
+ include: eus_exams_es
4
+ task: eus_exams_es_ejadministrativo
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_ejauxiliar
3
+ include: eus_exams_es
4
+ task: eus_exams_es_ejauxiliar
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_ejsubalterno
3
+ include: eus_exams_es
4
+ task: eus_exams_es_ejsubalterno
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_ejtecnico
3
+ include: eus_exams_es
4
+ task: eus_exams_es_ejtecnico
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeayuntamientovitoria
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeayuntamientovitoria
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opebilbao
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opebilbao
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehuadmin
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehuadmin
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehuaux
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehuaux
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehubiblio
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehubiblio
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehuderecho
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehuderecho
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehueconomicas
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehueconomicas
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehuempresariales
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehuempresariales
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehusubalterno
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehusubalterno
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehutecnico
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehutecnico
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeehutecnicob
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeehutecnicob
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakiadmin
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakiadmin
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakicelador
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakicelador
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakienf
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakienf
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakijuridico
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakijuridico
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakioperario
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakioperario
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakitecnico
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakitecnico
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakivarios
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakivarios
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza1c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza1c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza2c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza2c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza3c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza3c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza4c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza4c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza5c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza5c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza6c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza6c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza7c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza7c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza8c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza8c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_osakidetza9c
3
+ include: eus_exams_es
4
+ task: eus_exams_es_osakidetza9c
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_ejadministrari
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_ejadministrari
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_ejlaguntza
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_ejlaguntza
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_ejlaguntzaile
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_ejlaguntzaile
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opebilbaoeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opebilbaoeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehuadmineu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehuadmineu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehubiblioeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehubiblioeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehuderechoeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehuderechoeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehueconomicaseu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehueconomicaseu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehuempresarialeseu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehuempresarialeseu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehusubalternoeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehusubalternoeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehutecnicoeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehutecnicoeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehuteknikarib
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehuteknikarib
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opegasteizkoudala
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opegasteizkoudala
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeosakiadmineu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeosakiadmineu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeosakiauxenfeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeosakiauxenfeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeosakiauxeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeosakiauxeu