applied-ai-018 commited on
Commit
7218152
·
verified ·
1 Parent(s): 053f734

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/arc/README.md +54 -0
  2. lm-evaluation/lm_eval/tasks/arc/arc_challenge.yaml +3 -0
  3. lm-evaluation/lm_eval/tasks/arc/arc_easy.yaml +23 -0
  4. lm-evaluation/lm_eval/tasks/belebele/README.md +49 -0
  5. lm-evaluation/lm_eval/tasks/belebele/belebele_afr_Latn.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/belebele/belebele_als_Latn.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/belebele/belebele_apc_Arab.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/belebele/belebele_ars_Arab.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/belebele/belebele_ary_Arab.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/belebele/belebele_azj_Latn.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/belebele/belebele_bam_Latn.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Beng.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/belebele/belebele_bul_Cyrl.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/belebele/belebele_cat_Latn.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/belebele/belebele_ceb_Latn.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/belebele/belebele_dan_Latn.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/belebele/belebele_deu_Latn.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/belebele/belebele_ell_Grek.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/belebele/belebele_eng_Latn.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/belebele/belebele_eus_Latn.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/belebele/belebele_fin_Latn.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/belebele/belebele_fra_Latn.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/belebele/belebele_grn_Latn.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/belebele/belebele_guj_Gujr.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/belebele/belebele_hat_Latn.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/belebele/belebele_hau_Latn.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/belebele/belebele_heb_Hebr.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/belebele/belebele_hye_Armn.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/belebele/belebele_ibo_Latn.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/belebele/belebele_jav_Latn.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/belebele/belebele_kan_Knda.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/belebele/belebele_kea_Latn.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/belebele/belebele_kir_Cyrl.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/belebele/belebele_lin_Latn.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/belebele/belebele_lit_Latn.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/belebele/belebele_luo_Latn.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/belebele/belebele_mar_Deva.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/belebele/belebele_nya_Latn.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/belebele/belebele_ory_Orya.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/belebele/belebele_pan_Guru.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/belebele/belebele_sin_Sinh.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/belebele/belebele_sna_Latn.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/belebele/belebele_snd_Arab.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/belebele/belebele_spa_Latn.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/belebele/belebele_srp_Cyrl.yaml +4 -0
lm-evaluation/lm_eval/tasks/arc/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ARC
2
+
3
+ ### Paper
4
+
5
+ Title: Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
6
+
7
+ Abstract: https://arxiv.org/abs/1803.05457
8
+
9
+ The ARC dataset consists of 7,787 science exam questions drawn from a variety
10
+ of sources, including science questions provided under license by a research
11
+ partner affiliated with AI2. These are text-only, English language exam questions
12
+ that span several grade levels as indicated in the files. Each question has a
13
+ multiple choice structure (typically 4 answer options). The questions are sorted
14
+ into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
15
+ a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.
16
+
17
+ Homepage: https://allenai.org/data/arc
18
+
19
+
20
+ ### Citation
21
+
22
+ ```
23
+ @article{Clark2018ThinkYH,
24
+ title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
25
+ author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
26
+ journal={ArXiv},
27
+ year={2018},
28
+ volume={abs/1803.05457}
29
+ }
30
+ ```
31
+
32
+ ### Groups and Tasks
33
+
34
+ #### Groups
35
+
36
+ * `ai2_arc`: Evaluates `arc_easy` and `arc_challenge`
37
+
38
+ #### Tasks
39
+
40
+ * `arc_easy`
41
+ * `arc_challenge`
42
+
43
+ ### Checklist
44
+
45
+ For adding novel benchmarks/datasets to the library:
46
+ * [ ] Is the task an existing benchmark in the literature?
47
+ * [ ] Have you referenced the original paper that introduced the task?
48
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
49
+
50
+
51
+ If other tasks on this dataset are already supported:
52
+ * [ ] Is the "Main" variant of this task clearly denoted?
53
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
54
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/arc/arc_challenge.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: arc_easy.yaml
2
+ task: arc_challenge
3
+ dataset_name: ARC-Challenge
lm-evaluation/lm_eval/tasks/arc/arc_easy.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - ai2_arc
3
+ task: arc_easy
4
+ dataset_path: allenai/ai2_arc
5
+ dataset_name: ARC-Easy
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}\nAnswer:"
11
+ doc_to_target: "{{choices.label.index(answerKey)}}"
12
+ doc_to_choice: "{{choices.text}}"
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: "Question: {{question}}\nAnswer:"
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ metadata:
23
+ version: 1.0
lm-evaluation/lm_eval/tasks/belebele/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Belebele
2
+
3
+ ### Paper
4
+
5
+ The Belebele Benchmark for Massively Multilingual NLU Evaluation
6
+ https://arxiv.org/abs/2308.16884
7
+
8
+ Belebele is a multiple-choice machine reading comprehension (MRC) dataset spanning 122 language variants. This dataset enables the evaluation of mono- and multi-lingual models in high-, medium-, and low-resource languages. Each question has four multiple-choice answers and is linked to a short passage from the FLORES-200 dataset. The human annotation procedure was carefully curated to create questions that discriminate between different levels of generalizable language comprehension and is reinforced by extensive quality checks. While all questions directly relate to the passage, the English dataset on its own proves difficult enough to challenge state-of-the-art language models. Being fully parallel, this dataset enables direct comparison of model performance across all languages. Belebele opens up new avenues for evaluating and analyzing the multilingual abilities of language models and NLP systems.
9
+
10
+ Homepage: https://github.com/facebookresearch/belebele
11
+
12
+ ### Citation
13
+
14
+ ```bibtex
15
+ @misc{bandarkar2023belebele,
16
+ title={The Belebele Benchmark: a Parallel Reading Comprehension Dataset in 122 Language Variants},
17
+ author={Lucas Bandarkar and Davis Liang and Benjamin Muller and Mikel Artetxe and Satya Narayan Shukla and Donald Husa and Naman Goyal and Abhinandan Krishnan and Luke Zettlemoyer and Madian Khabsa},
18
+ year={2023},
19
+ eprint={2308.16884},
20
+ archivePrefix={arXiv},
21
+ primaryClass={cs.CL}
22
+ }
23
+ ```
24
+
25
+ ### Groups and Tasks
26
+
27
+ #### Groups
28
+
29
+ - `belebele`: All 122 languages of the Belebele dataset, evaluated following the methodology in MMLU's original implementation.
30
+
31
+ #### Tasks
32
+
33
+
34
+ The following tasks evaluate languages in the Belebele dataset using loglikelihood-based multiple-choice scoring:
35
+ - `belebele_{language}`
36
+
37
+ The variant evaluated here is the 0-shot or few-shot evaluation with English Instructions.
38
+
39
+ ### Checklist
40
+
41
+ * [x] Is the task an existing benchmark in the literature?
42
+ * [x] Have you referenced the original paper that introduced the task?
43
+ * [x] If yes, does the original paper provide a reference implementation?
44
+ * [ ] Yes, original implementation contributed by author of the benchmark
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [x] Is the "Main" variant of this task clearly denoted?
48
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/belebele/belebele_afr_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "afr_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_afr_Latn"
4
+ "test_split": "afr_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_als_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "als_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_als_Latn"
4
+ "test_split": "als_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_apc_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "apc_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_apc_Arab"
4
+ "test_split": "apc_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_ars_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ars_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ars_Arab"
4
+ "test_split": "ars_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_ary_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ary_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ary_Arab"
4
+ "test_split": "ary_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_azj_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "azj_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_azj_Latn"
4
+ "test_split": "azj_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_bam_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "bam_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_bam_Latn"
4
+ "test_split": "bam_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ben_Beng.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ben_Beng"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ben_Beng"
4
+ "test_split": "ben_Beng"
lm-evaluation/lm_eval/tasks/belebele/belebele_bul_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "bul_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_bul_Cyrl"
4
+ "test_split": "bul_Cyrl"
lm-evaluation/lm_eval/tasks/belebele/belebele_cat_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "cat_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_cat_Latn"
4
+ "test_split": "cat_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ceb_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ceb_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ceb_Latn"
4
+ "test_split": "ceb_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_dan_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "dan_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_dan_Latn"
4
+ "test_split": "dan_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_deu_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "deu_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_deu_Latn"
4
+ "test_split": "deu_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ell_Grek.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ell_Grek"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ell_Grek"
4
+ "test_split": "ell_Grek"
lm-evaluation/lm_eval/tasks/belebele/belebele_eng_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "eng_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_eng_Latn"
4
+ "test_split": "eng_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_eus_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "eus_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_eus_Latn"
4
+ "test_split": "eus_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_fin_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "fin_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_fin_Latn"
4
+ "test_split": "fin_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_fra_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "fra_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_fra_Latn"
4
+ "test_split": "fra_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_gaz_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "gaz_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_gaz_Latn"
4
+ "test_split": "gaz_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_grn_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "grn_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_grn_Latn"
4
+ "test_split": "grn_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_guj_Gujr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "guj_Gujr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_guj_Gujr"
4
+ "test_split": "guj_Gujr"
lm-evaluation/lm_eval/tasks/belebele/belebele_hat_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hat_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hat_Latn"
4
+ "test_split": "hat_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_hau_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hau_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hau_Latn"
4
+ "test_split": "hau_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_heb_Hebr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "heb_Hebr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_heb_Hebr"
4
+ "test_split": "heb_Hebr"
lm-evaluation/lm_eval/tasks/belebele/belebele_hrv_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hrv_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hrv_Latn"
4
+ "test_split": "hrv_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_hye_Armn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "hye_Armn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_hye_Armn"
4
+ "test_split": "hye_Armn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ibo_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ibo_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ibo_Latn"
4
+ "test_split": "ibo_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_jav_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "jav_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_jav_Latn"
4
+ "test_split": "jav_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_kan_Knda.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kan_Knda"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kan_Knda"
4
+ "test_split": "kan_Knda"
lm-evaluation/lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kaz_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kaz_Cyrl"
4
+ "test_split": "kaz_Cyrl"
lm-evaluation/lm_eval/tasks/belebele/belebele_kea_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kea_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kea_Latn"
4
+ "test_split": "kea_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_kir_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kir_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kir_Cyrl"
4
+ "test_split": "kir_Cyrl"
lm-evaluation/lm_eval/tasks/belebele/belebele_lao_Laoo.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lao_Laoo"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lao_Laoo"
4
+ "test_split": "lao_Laoo"
lm-evaluation/lm_eval/tasks/belebele/belebele_lin_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lin_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lin_Latn"
4
+ "test_split": "lin_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_lit_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "lit_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_lit_Latn"
4
+ "test_split": "lit_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_luo_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "luo_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_luo_Latn"
4
+ "test_split": "luo_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_mar_Deva.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mar_Deva"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mar_Deva"
4
+ "test_split": "mar_Deva"
lm-evaluation/lm_eval/tasks/belebele/belebele_mya_Mymr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mya_Mymr"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mya_Mymr"
4
+ "test_split": "mya_Mymr"
lm-evaluation/lm_eval/tasks/belebele/belebele_nya_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "nya_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_nya_Latn"
4
+ "test_split": "nya_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ory_Orya.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ory_Orya"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ory_Orya"
4
+ "test_split": "ory_Orya"
lm-evaluation/lm_eval/tasks/belebele/belebele_pan_Guru.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "pan_Guru"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_pan_Guru"
4
+ "test_split": "pan_Guru"
lm-evaluation/lm_eval/tasks/belebele/belebele_sin_Sinh.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "sin_Sinh"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_sin_Sinh"
4
+ "test_split": "sin_Sinh"
lm-evaluation/lm_eval/tasks/belebele/belebele_sna_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "sna_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_sna_Latn"
4
+ "test_split": "sna_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_snd_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "snd_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_snd_Arab"
4
+ "test_split": "snd_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_spa_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "spa_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_spa_Latn"
4
+ "test_split": "spa_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_srp_Cyrl.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "srp_Cyrl"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_srp_Cyrl"
4
+ "test_split": "srp_Cyrl"