applied-ai-018 commited on
Commit
d9e27f8
·
verified ·
1 Parent(s): a329543

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/README.md +48 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml +4 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml +4 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml +4 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml +4 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml +4 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml +4 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml +4 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml +4 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml +4 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml +4 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml +4 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/README.md +47 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc1_yaml +20 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc2_yaml +12 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc1.yaml +7 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc2.yaml +7 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc1.yaml +7 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc2.yaml +7 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ca_mc2.yaml +7 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc1.yaml +7 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc2.yaml +7 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_de_mc1.yaml +7 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc1.yaml +7 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc2.yaml +7 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc1.yaml +7 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc2.yaml +7 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc1.yaml +7 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc2.yaml +7 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc1.yaml +7 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc2.yaml +7 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc1.yaml +7 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc2.yaml +7 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc1.yaml +7 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc2.yaml +7 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc1.yaml +7 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc2.yaml +7 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc1.yaml +7 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc2.yaml +7 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc1.yaml +7 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc2.yaml +7 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc1.yaml +7 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc2.yaml +7 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc1.yaml +7 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc2.yaml +7 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ml_mc2.yaml +7 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_mr_mc2.yaml +7 -0
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CMMLU
2
+
3
+ ### Paper
4
+
5
+ CMMLU: Measuring massive multitask language understanding in Chinese
6
+ https://arxiv.org/abs/2306.09212
7
+
8
+ CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
9
+ CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
10
+
11
+ Homepage: https://github.com/haonan-li/CMMLU
12
+
13
+ ### Citation
14
+
15
+ ```bibtex
16
+ @misc{li2023cmmlu,
17
+ title={CMMLU: Measuring massive multitask language understanding in Chinese},
18
+ author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
19
+ year={2023},
20
+ eprint={2306.09212},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.CL}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ - `cmmlu`: All 67 subjects of the CMMLU dataset, evaluated following the methodology in MMLU's original implementation.
31
+
32
+ #### Tasks
33
+
34
+
35
+ The following tasks evaluate subjects in the CMMLU dataset using loglikelihood-based multiple-choice scoring:
36
+ - `cmmlu_{subject_english}`
37
+
38
+ ### Checklist
39
+
40
+ * [x] Is the task an existing benchmark in the literature?
41
+ * [x] Have you referenced the original paper that introduced the task?
42
+ * [x] If yes, does the original paper provide a reference implementation?
43
+ * [x] Yes, original implementation contributed by author of the benchmark
44
+
45
+ If other tasks on this dataset are already supported:
46
+ * [x] Is the "Main" variant of this task clearly denoted?
47
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
48
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "agronomy"
2
+ "description": "以下是关于农学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_agronomy"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_driving_rule"
2
+ "description": "以下是关于中国驾驶规则的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_driving_rule"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "chinese_food_culture"
2
+ "description": "以下是关于中国饮食文化的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_chinese_food_culture"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "college_actuarial_science"
2
+ "description": "以下是关于大学精算学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_college_actuarial_science"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "elementary_commonsense"
2
+ "description": "以下是关于小学常识的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_elementary_commonsense"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "elementary_mathematics"
2
+ "description": "以下是关于初等数学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_elementary_mathematics"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "ethnology"
2
+ "description": "以下是关于民族学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_ethnology"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "high_school_physics"
2
+ "description": "以下是关于高中物理学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_high_school_physics"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "human_sexuality"
2
+ "description": "以下是关于人类性行为的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_human_sexuality"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_management.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "management"
2
+ "description": "以下是关于管理学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_management"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "philosophy"
2
+ "description": "以下是关于哲学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_philosophy"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "professional_medicine"
2
+ "description": "以下是关于专业医学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_professional_medicine"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "security_study"
2
+ "description": "以下是关于安全研究的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_security_study"
lm-evaluation/build/lib/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "dataset_name": "sociology"
2
+ "description": "以下是关于社会学的单项选择题,请直接给出正确答案的选项。\n\n"
3
+ "include": "_default_template_yaml"
4
+ "task": "cmmlu_sociology"
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multilingual TruthfulQA
2
+
3
+ ### Paper
4
+
5
+ Title: `Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback`
6
+
7
+ Abstract: https://arxiv.org/abs/2307.16039
8
+
9
+ A key technology for the development of large language models (LLMs) involves instruction tuning that helps align the models' responses with human expectations to realize impressive learning abilities. Two major approaches for instruction tuning characterize supervised fine-tuning (SFT) and reinforcement learning from human feedback (RLHF), which are currently applied to produce the best commercial LLMs (e.g., ChatGPT). To improve the accessibility of LLMs for research and development efforts, various instruction-tuned open-source LLMs have also been introduced recently, e.g., Alpaca, Vicuna, to name a few. However, existing open-source LLMs have only been instruction-tuned for English and a few popular languages, thus hindering their impacts and accessibility to many other languages in the world. Among a few very recent work to explore instruction tuning for LLMs in multiple languages, SFT has been used as the only approach to instruction-tune LLMs for multiple languages. This has left a significant gap for fine-tuned LLMs based on RLHF in diverse languages and raised important questions on how RLHF can boost the performance of multilingual instruction tuning. To overcome this issue, we present Okapi, the first system with instruction-tuned LLMs based on RLHF for multiple languages. Okapi introduces instruction and response-ranked data in 26 diverse languages to facilitate the experiments and development of future multilingual LLM research. We also present benchmark datasets to enable the evaluation of generative LLMs in multiple languages. Our experiments demonstrate the advantages of RLHF for multilingual instruction over SFT for different base models and datasets. Our framework and resources are released at this https URL.
10
+
11
+ Homepage: `https://github.com/nlp-uoregon/Okapi`
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @article{dac2023okapi,
18
+ title={Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback},
19
+ author={Dac Lai, Viet and Van Nguyen, Chien and Ngo, Nghia Trung and Nguyen, Thuat and Dernoncourt, Franck and Rossi, Ryan A and Nguyen, Thien Huu},
20
+ journal={arXiv e-prints},
21
+ pages={arXiv--2307},
22
+ year={2023}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ - truthfulqa_multilingual
31
+
32
+ #### Tasks
33
+
34
+ - `truthfulqa_{ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi,zh}`
35
+
36
+ ### Checklist
37
+
38
+ For adding novel benchmarks/datasets to the library:
39
+ * [x] Is the task an existing benchmark in the literature?
40
+ * [x] Have you referenced the original paper that introduced the task?
41
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
42
+
43
+
44
+ If other tasks on this dataset are already supported:
45
+ * [ ] Is the "Main" variant of this task clearly denoted?
46
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
47
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc1_yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - truthfulqa_multilingual
3
+ dataset_path: null
4
+ dataset_name: null
5
+ output_type: multiple_choice
6
+ training_split: null
7
+ validation_split: val
8
+ test_split: null
9
+ process_docs: !function utils.process_docs
10
+ doc_to_text: "query"
11
+ doc_to_target: 0
12
+ doc_to_choice: "mc1_choices"
13
+ should_decontaminate: True
14
+ doc_to_decontamination_query: "question"
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ metadata:
20
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc2_yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ doc_to_target: 0
3
+ doc_to_choice: "mc2_choices"
4
+ process_results: !function utils.process_results_mc2
5
+ should_decontaminate: True
6
+ doc_to_decontamination_query: "question"
7
+ metric_list:
8
+ - metric: acc
9
+ aggregation: mean
10
+ higher_is_better: true
11
+ metadata:
12
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_ar_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: ar
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_ar_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: ar
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_bn_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: bn
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_bn_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: bn
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ca_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_ca_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: ca
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_da_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: da
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_da_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: da
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_de_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_de_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: de
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_es_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: es
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_es_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: es
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_eu_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: eu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_eu_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: eu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_fr_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: fr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_fr_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: fr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_gu_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: gu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_gu_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: gu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_hi_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hi
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_hi_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hi
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_hr_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_hr_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_hu_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_hu_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hu
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_hy_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hy
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_hy_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: hy
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_id_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: id
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_id_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: id
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_it_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: it
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_it_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: it
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc1.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc1_yaml
2
+ task: truthfulqa_kn_mc1
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: kn
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_kn_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: kn
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ml_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_ml_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: ml
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null
lm-evaluation/build/lib/lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_mr_mc2.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ include: _truthfulqa_mc2_yaml
2
+ task: truthfulqa_mr_mc2
3
+ dataset_path: alexandrainst/m_truthfulqa
4
+ dataset_name: mr
5
+ training_split: null
6
+ validation_split: val
7
+ test_split: null