applied-ai-018 commited on
Commit
4bbd3bb
·
verified ·
1 Parent(s): 7bcece7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es +4 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml +4 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml +4 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu +4 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/eus_exams/utils.py +15 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/README.md +54 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/commonsense.yaml +15 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/deontology.yaml +9 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/justice.yaml +9 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml +12 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml +16 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utils.py +25 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/virtue.yaml +10 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/README.md +47 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml +27 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml +27 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml +27 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/README.md +48 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml +21 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml +6 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml +6 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml +6 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml +6 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml +6 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml +6 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml +6 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml +6 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml +6 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml +6 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml +6 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml +6 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml +6 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml +6 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml +6 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml +6 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml +6 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml +6 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml +6 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml +6 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml +6 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml +6 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml +6 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py +25 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: eus_exams
2
+ group:
3
+ - eus_exams_es
4
+ doc_to_text: "Pregunta: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nRespuesta:"
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakiaux
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakiaux
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: es_opeosakiauxenf
3
+ include: eus_exams_es
4
+ task: eus_exams_es_opeosakiauxenf
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ include: eus_exams
2
+ group:
3
+ - eus_exams_eu
4
+ doc_to_text: "Galdera: {{question}}\nA: {{candidates[0]}}\nB: {{candidates[1]}}\nC: {{candidates[2]}}\nD: {{candidates[3]}}\nErantzuna:"
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_ejteknikari
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_ejteknikari
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_opeehuauxeu
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_opeehuauxeu
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: eu_osakidetza6e
3
+ include: eus_exams_eu
4
+ task: eus_exams_eu_osakidetza6e
lm-evaluation/build/lib/lm_eval/tasks/eus_exams/utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ def process_docs(dataset: datasets.Dataset):
5
+ """Filter out examples with no answer."""
6
+
7
+ def valid_example(example: dict) -> bool:
8
+ """Check if an example is valid."""
9
+ if example["answer"] not in [0, 1, 2, 3]:
10
+ return False
11
+ if example["candidates"] == ["", "", "", ""]:
12
+ return False
13
+ return True
14
+
15
+ return dataset.filter(valid_example)
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ETHICS Dataset
2
+
3
+ ### Paper
4
+
5
+ Pointer Sentinel Mixture Models
6
+ https://arxiv.org/pdf/1609.07843.pdf
7
+
8
+ The ETHICS dataset is a benchmark that spans concepts in justice, well-being,
9
+ duties, virtues, and commonsense morality. Models predict widespread moral
10
+ judgments about diverse text scenarios. This requires connecting physical and
11
+ social world knowledge to value judgements, a capability that may enable us
12
+ to steer chatbot outputs or eventually regularize open-ended reinforcement
13
+ learning agents.
14
+
15
+ Homepage: https://github.com/hendrycks/ethics
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @article{hendrycks2021ethics
21
+ title={Aligning AI With Shared Human Values},
22
+ author={Dan Hendrycks and Collin Burns and Steven Basart and Andrew Critch and Jerry Li and Dawn Song and Jacob Steinhardt},
23
+ journal={Proceedings of the International Conference on Learning Representations (ICLR)},
24
+ year={2021}
25
+ }
26
+ ```
27
+
28
+ ### Groups and Tasks
29
+
30
+ #### Groups
31
+
32
+ - `hendrycks_ethics`
33
+
34
+ #### Tasks
35
+
36
+ * `ethics_cm`
37
+ * `ethics_deontology`
38
+ * `ethics_justice`
39
+ * `ethics_utilitarianism`
40
+ * (MISSING) `ethics_utilitarianism_original`
41
+ * `ethics_virtue`
42
+
43
+ ### Checklist
44
+
45
+ * [x] Is the task an existing benchmark in the literature?
46
+ * [ ] Have you referenced the original paper that introduced the task?
47
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
48
+
49
+
50
+ If other tasks on this dataset are already supported:
51
+ * [x] Is the "Main" variant of this task clearly denoted?
52
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
53
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
54
+ * [ ] Matches v0.3.0 of Eval Harness
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/commonsense.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - hendrycks_ethics
3
+ task: ethics_cm
4
+ dataset_path: EleutherAI/hendrycks_ethics
5
+ dataset_name: commonsense
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ test_split: test
9
+ doc_to_text: "{{input}}\nQuestion: Is this wrong?\nAnswer:"
10
+ doc_to_target: label
11
+ doc_to_choice: ['no', 'yes']
12
+ metric_list:
13
+ - metric: acc
14
+ metadata:
15
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/deontology.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ include: commonsense.yaml
2
+ task: ethics_deontology
3
+ dataset_name: deontology
4
+ doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}} {{excuse.rstrip()}}\"\nAnswer:"
5
+ doc_to_target: label
6
+ doc_to_choice: ['unreasonable', 'reasonable']
7
+ metadata:
8
+ version: 1.0
9
+ # TODO: implement exact-match metric for this subset
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/justice.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ include: deontology.yaml
2
+ group:
3
+ - hendrycks_ethics
4
+ task: ethics_justice
5
+ dataset_name: justice
6
+ doc_to_text: "Question: Would most people believe this reasonable or unreasonable to say? \"{{scenario}}\"\nAnswer:"
7
+ # TODO: impl. exact match for this and deontology
8
+ metadata:
9
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: commonsense.yaml
2
+ group:
3
+ - hendrycks_ethics
4
+ task: ethics_utilitarianism
5
+ dataset_name: utilitarianism
6
+ doc_to_text: !function utils.doc_to_text
7
+ doc_to_target: !function utils.doc_to_target
8
+ doc_to_choice: ['no', 'yes']
9
+ metric_list:
10
+ - metric: acc
11
+ metadata:
12
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # group:
2
+ # - hendrycks_ethics
3
+ # task: ethics_utilitarianism_original
4
+ # dataset_path: hails/hendrycks_ethics
5
+ # dataset_name: utilitarianism
6
+ # output_type: winograd_schema
7
+ # fewshot_split: null # TODO: implement a special fewshot split for this dataset subsets
8
+ # test_split: test
9
+ # template_aliases: #"{% set answer_choices = range(1, 11)|list %}"
10
+ # doc_to_text: 'Activity: "{{activity}}"\nRating:'
11
+ # doc_to_target: "{{answer_choices[label]}}"
12
+ # metric_list:
13
+ # - metric: acc
14
+ # TODO: we want this to be implemented as a winograd_schema task type, actually
15
+ # metadata:
16
+ # version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+
4
+ ### Utils for `ethics_utilitarianism` task below
5
+ def _preproc_doc(doc):
6
+ rnd = random.Random(doc["activity"])
7
+ scenarios = [doc["activity"], doc["baseline"]]
8
+ ordering = [0, 1]
9
+ rnd.shuffle(ordering)
10
+ doc = {
11
+ "scenarios": [scenarios[ordering[0]], scenarios[ordering[1]]],
12
+ # The correct scenario is always first
13
+ "label": int(ordering.index(0) == 0),
14
+ }
15
+ return doc
16
+
17
+
18
+ def doc_to_text(doc) -> str:
19
+ doc = _preproc_doc(doc)
20
+ return f"Scenario 1: {doc['scenarios'][0]}\nScenario 2: {doc['scenarios'][1]}\nQuestion: Is Scenario 1 preferable?\nAnswer:"
21
+
22
+
23
+ def doc_to_target(doc):
24
+ doc = _preproc_doc(doc)
25
+ return doc["label"]
lm-evaluation/build/lib/lm_eval/tasks/hendrycks_ethics/virtue.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ include: commonsense.yaml
2
+ group:
3
+ - hendrycks_ethics
4
+ task: ethics_virtue
5
+ dataset_name: virtue
6
+ doc_to_text: "Sentence: {{scenario}}\nQuestion: Does the character in this sentence exhibit the trait \"{{trait}}\"?\nAnswer:"
7
+ doc_to_target: label
8
+ doc_to_choice: ['no', 'yes']
9
+ metadata:
10
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # KorMedMCQA
2
+
3
+ ### Paper
4
+
5
+ Title: `KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations`
6
+
7
+ Abstract: `We introduce KorMedMCQA, the first Korean multiple-choice question answering (MCQA) benchmark derived from Korean healthcare professional licensing examinations, covering from the year 2012 to year 2023. This dataset consists of a selection of questions from the license examinations for doctors, nurses, and pharmacists, featuring a diverse array of subjects. We conduct baseline experiments on various large language models, including proprietary/open-source, multilingual/Korean-additional pretrained, and clinical context pretrained models, highlighting the potential for further enhancements. We make our data publicly available on HuggingFace and provide a evaluation script via LM-Harness, inviting further exploration and advancement in Korean healthcare environments.`
8
+
9
+
10
+ Paper : https://arxiv.org/abs/2403.01469
11
+
12
+ Homepage: https://huggingface.co/datasets/sean0042/KorMedMCQA
13
+
14
+
15
+ ### Citation
16
+
17
+ ```
18
+ @article{kweon2024kormedmcqa,
19
+ title={KorMedMCQA: Multi-Choice Question Answering Benchmark for Korean Healthcare Professional Licensing Examinations},
20
+ author={Sunjun Kweon and Byungjin Choi and Minkyu Kim and Rae Woong Park and Edward Choi},
21
+ journal={arXiv preprint arXiv:2403.01469},
22
+ year={2024}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ * `kormedmcqa`: Runs `kormedmcqa_doctor`, `kormedmcqa_nurse`, and `kormedmcqa_pharm`.
29
+
30
+ #### Tasks
31
+
32
+ * `kormedmcqa_doctor`: `Official Korean Doctor Examination`
33
+ * `kormedmcqa_nurse`: `Official Korean Nurse Examination`
34
+ * `kormedmcqa_pharm`: `Official Korean Pharmacist Examination`
35
+
36
+ ### Checklist
37
+
38
+ For adding novel benchmarks/datasets to the library:
39
+ * [x] Is the task an existing benchmark in the literature?
40
+ * [x] Have you referenced the original paper that introduced the task?
41
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
42
+
43
+
44
+ If other tasks on this dataset are already supported:
45
+ * [ ] Is the "Main" variant of this task clearly denoted?
46
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
47
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: kormedmcqa
2
+ task : kormedmcqa_doctor
3
+ dataset_path : sean0042/KorMedMCQA
4
+ dataset_name : doctor
5
+ test_split : test
6
+ fewshot_split : dev
7
+ fewshot_config:
8
+ sampler: first_n
9
+ output_type: generate_until
10
+ doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
11
+ doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: true
18
+ regexes_to_ignore:
19
+ - " "
20
+ generation_kwargs:
21
+ until:
22
+ - "Q:"
23
+ - "\n\n"
24
+ - "</s>"
25
+ - "."
26
+ do_sample: false
27
+ temperature: 0.0
lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: kormedmcqa
2
+ task : kormedmcqa_nurse
3
+ dataset_path : sean0042/KorMedMCQA
4
+ dataset_name : nurse
5
+ test_split : test
6
+ fewshot_split : dev
7
+ fewshot_config:
8
+ sampler: first_n
9
+ output_type: generate_until
10
+ doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
11
+ doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: true
18
+ regexes_to_ignore:
19
+ - " "
20
+ generation_kwargs:
21
+ until:
22
+ - "Q:"
23
+ - "\n\n"
24
+ - "</s>"
25
+ - "."
26
+ do_sample: false
27
+ temperature: 0.0
lm-evaluation/build/lib/lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: kormedmcqa
2
+ task : kormedmcqa_pharm
3
+ dataset_path : sean0042/KorMedMCQA
4
+ dataset_name : pharm
5
+ test_split : test
6
+ fewshot_split : dev
7
+ fewshot_config:
8
+ sampler: first_n
9
+ output_type: generate_until
10
+ doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\nE. {{E}}\n정답:"
11
+ doc_to_target: "{{['A', 'B', 'C', 'D', 'E'][answer-1]}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: true
18
+ regexes_to_ignore:
19
+ - " "
20
+ generation_kwargs:
21
+ until:
22
+ - "Q:"
23
+ - "\n\n"
24
+ - "</s>"
25
+ - "."
26
+ do_sample: false
27
+ temperature: 0.0
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multilingual HellaSwag
2
+
3
+ ### Paper
4
+
5
+ Title: `Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback`
6
+
7
+ Abstract: https://arxiv.org/abs/2307.16039
8
+
9
+ A key technology for the development of large language models (LLMs) involves instruction tuning that helps align the models' responses with human expectations to realize impressive learning abilities. Two major approaches for instruction tuning characterize supervised fine-tuning (SFT) and reinforcement learning from human feedback (RLHF), which are currently applied to produce the best commercial LLMs (e.g., ChatGPT). To improve the accessibility of LLMs for research and development efforts, various instruction-tuned open-source LLMs have also been introduced recently, e.g., Alpaca, Vicuna, to name a few. However, existing open-source LLMs have only been instruction-tuned for English and a few popular languages, thus hindering their impacts and accessibility to many other languages in the world. Among a few very recent work to explore instruction tuning for LLMs in multiple languages, SFT has been used as the only approach to instruction-tune LLMs for multiple languages. This has left a significant gap for fine-tuned LLMs based on RLHF in diverse languages and raised important questions on how RLHF can boost the performance of multilingual instruction tuning. To overcome this issue, we present Okapi, the first system with instruction-tuned LLMs based on RLHF for multiple languages. Okapi introduces instruction and response-ranked data in 26 diverse languages to facilitate the experiments and development of future multilingual LLM research. We also present benchmark datasets to enable the evaluation of generative LLMs in multiple languages. Our experiments demonstrate the advantages of RLHF for multilingual instruction over SFT for different base models and datasets. Our framework and resources are released at this https URL.
10
+
11
+ Homepage: `https://github.com/nlp-uoregon/Okapi`
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @article{dac2023okapi,
18
+ title={Okapi: Instruction-tuned Large Language Models in Multiple Languages with Reinforcement Learning from Human Feedback},
19
+ author={Dac Lai, Viet and Van Nguyen, Chien and Ngo, Nghia Trung and Nguyen, Thuat and Dernoncourt, Franck and Rossi, Ryan A and Nguyen, Thien Huu},
20
+ journal={arXiv e-prints},
21
+ pages={arXiv--2307},
22
+ year={2023}
23
+ }
24
+ ```
25
+
26
+ ### Groups and Tasks
27
+
28
+ #### Groups
29
+
30
+ - hellaswag_multilingual
31
+
32
+ #### Tasks
33
+
34
+ - `hellaswag_{ar,bn,ca,da,de,es,eu,fr,gu,hi,hr,hu,hy,id,it,kn,ml,mr,ne,nl,pt,ro,ru,sk,sr,sv,ta,te,uk,vi}`
35
+
36
+
37
+ ### Checklist
38
+
39
+ For adding novel benchmarks/datasets to the library:
40
+ * [x] Is the task an existing benchmark in the literature?
41
+ * [x] Have you referenced the original paper that introduced the task?
42
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
43
+
44
+
45
+ If other tasks on this dataset are already supported:
46
+ * [ ] Is the "Main" variant of this task clearly denoted?
47
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
48
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - hellaswag_multilingual
3
+ dataset_path: null
4
+ dataset_name: null
5
+ output_type: multiple_choice
6
+ training_split: null
7
+ validation_split: validation
8
+ test_split: null
9
+ process_docs: !function utils.process_docs
10
+ doc_to_text: "query"
11
+ doc_to_target: "{{label.lstrip()}}"
12
+ doc_to_choice: "choices"
13
+ metric_list:
14
+ - metric: acc
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ - metric: acc_norm
18
+ aggregation: mean
19
+ higher_is_better: true
20
+ metadata:
21
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ca
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ca
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_da
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: da
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_de
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: de
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_eu
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: eu
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_gu
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: gu
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_hi
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: hi
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_hr
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: hr
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_hy
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: hy
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_id
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: id
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_it
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: it
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_kn
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: kn
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ml
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ml
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_mr
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: mr
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ne
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ne
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_nl
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: nl
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_pt
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: pt
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ro
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ro
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ru
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ru
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_sk
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: sk
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_sr
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: sr
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_sv
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: sv
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_ta
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: ta
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ include: _hellaswag_yaml
2
+ task: hellaswag_vi
3
+ dataset_path: alexandrainst/m_hellaswag
4
+ dataset_name: vi
5
+ training_split: null
6
+ validation_split: val
lm-evaluation/build/lib/lm_eval/tasks/okapi/hellaswag_multilingual/utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import datasets
4
+
5
+
6
+ def preprocess(text):
7
+ text = text.strip()
8
+ # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
9
+ text = text.replace(" [title]", ". ")
10
+ text = re.sub("\\[.*?\\]", "", text)
11
+ text = text.replace(" ", " ")
12
+ return text
13
+
14
+
15
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
16
+ def _process_doc(doc):
17
+ ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
18
+ out_doc = {
19
+ "query": preprocess(doc["activity_label"] + ": " + ctx),
20
+ "choices": [preprocess(ending) for ending in doc["endings"]],
21
+ "gold": int(doc["label"]),
22
+ }
23
+ return out_doc
24
+
25
+ return dataset.map(_process_doc)
lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: ar
3
+ include: _default_yaml
4
+ task: m_mmlu_ar
lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: da
3
+ include: _default_yaml
4
+ task: m_mmlu_da
lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: hr
3
+ include: _default_yaml
4
+ task: m_mmlu_hr
lm-evaluation/build/lib/lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: is
3
+ include: _default_yaml
4
+ task: m_mmlu_is