applied-ai-018 commited on
Commit
3e54ddc
·
verified ·
1 Parent(s): 40cafe9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/arithmetic/README.md +60 -0
  2. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml +18 -0
  3. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2da.yaml +5 -0
  4. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml +5 -0
  5. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml +5 -0
  6. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3da.yaml +5 -0
  7. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml +5 -0
  8. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4da.yaml +5 -0
  9. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml +5 -0
  10. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5da.yaml +5 -0
  11. lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml +5 -0
  12. lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Latn.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/belebele/belebele_ita_Latn.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/belebele/belebele_kin_Latn.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Deva.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/belebele/belebele_tur_Latn.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/belebele/belebele_urd_Arab.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hant.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/fld/README.md +64 -0
  23. lm-evaluation/lm_eval/tasks/fld/fld_default.yaml +21 -0
  24. lm-evaluation/lm_eval/tasks/fld/fld_star.yaml +3 -0
  25. lm-evaluation/lm_eval/tasks/headqa/README.md +57 -0
  26. lm-evaluation/lm_eval/tasks/headqa/headqa_en.yaml +23 -0
  27. lm-evaluation/lm_eval/tasks/headqa/headqa_es.yaml +3 -0
  28. lm-evaluation/lm_eval/tasks/hellaswag/README.md +49 -0
  29. lm-evaluation/lm_eval/tasks/medqa/medqa.yaml +16 -0
  30. lm-evaluation/lm_eval/tasks/medqa/preprocess_medqa.py +13 -0
  31. lm-evaluation/lm_eval/tasks/race/README.md +62 -0
  32. lm-evaluation/lm_eval/tasks/race/preprocess_race.py +40 -0
  33. lm-evaluation/lm_eval/tasks/race/race.yaml +16 -0
  34. lm-evaluation/lm_eval/tasks/tmmluplus/default/_default_template_yaml +19 -0
  35. lm-evaluation/lm_eval/tasks/tmmluplus/default/_generate_configs.py +210 -0
  36. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml +6 -0
  37. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml +7 -0
  38. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml +7 -0
  39. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml +7 -0
  40. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml +7 -0
  41. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml +7 -0
  42. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml +7 -0
  43. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml +7 -0
  44. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml +7 -0
  45. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml +7 -0
  46. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml +7 -0
  47. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml +7 -0
  48. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml +7 -0
  49. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml +7 -0
  50. lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml +7 -0
lm-evaluation/lm_eval/tasks/arithmetic/README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Arithmetic
2
+
3
+ ### Paper
4
+
5
+ Title: `Language Models are Few-Shot Learners`
6
+ Abstract: https://arxiv.org/abs/2005.14165
7
+
8
+ A small battery of 10 tests that involve asking language models a simple arithmetic
9
+ problem in natural language.
10
+
11
+ Homepage: https://github.com/openai/gpt-3/tree/master/data
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @inproceedings{NEURIPS2020_1457c0d6,
18
+ author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
19
+ booktitle = {Advances in Neural Information Processing Systems},
20
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
21
+ pages = {1877--1901},
22
+ publisher = {Curran Associates, Inc.},
23
+ title = {Language Models are Few-Shot Learners},
24
+ url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf},
25
+ volume = {33},
26
+ year = {2020}
27
+ }
28
+ ```
29
+
30
+ ### Groups and Tasks
31
+
32
+ #### Groups
33
+
34
+ * `arithmetic`: Evaluates `1dc` to `5ds`
35
+
36
+ #### Tasks
37
+
38
+ * `arithmetic_1dc`
39
+ * `arithmetic_2da`
40
+ * `arithmetic_2dm`
41
+ * `arithmetic_2ds`
42
+ * `arithmetic_3da`
43
+ * `arithmetic_3ds`
44
+ * `arithmetic_4da`
45
+ * `arithmetic_4ds`
46
+ * `arithmetic_5da`
47
+ * `arithmetic_5ds`
48
+
49
+ ### Checklist
50
+
51
+ For adding novel benchmarks/datasets to the library:
52
+ * [ ] Is the task an existing benchmark in the literature?
53
+ * [ ] Have you referenced the original paper that introduced the task?
54
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
55
+
56
+
57
+ If other tasks on this dataset are already supported:
58
+ * [ ] Is the "Main" variant of this task clearly denoted?
59
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
60
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_1dc.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - arithmetic
3
+ task: arithmetic_1dc
4
+ dataset_path: EleutherAI/arithmetic
5
+ dataset_name: arithmetic_1dc
6
+ output_type: loglikelihood
7
+ validation_split: validation
8
+ test_split: null
9
+ doc_to_text: "{{context}}"
10
+ doc_to_target: "{{completion}}"
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ metadata:
16
+ version: 1.0
17
+ dataset_kwargs:
18
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2da
3
+ dataset_name: arithmetic_2da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2dm.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2dm
3
+ dataset_name: arithmetic_2dm
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_2ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_2ds
3
+ dataset_name: arithmetic_2ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_3da
3
+ dataset_name: arithmetic_3da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_3ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_3ds
3
+ dataset_name: arithmetic_3ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_4da
3
+ dataset_name: arithmetic_4da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_4ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_4ds
3
+ dataset_name: arithmetic_4ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5da.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_5da
3
+ dataset_name: arithmetic_5da
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/arithmetic/arithmetic_5ds.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ include: arithmetic_1dc.yaml
2
+ task: arithmetic_5ds
3
+ dataset_name: arithmetic_5ds
4
+ dataset_kwargs:
5
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/belebele/belebele_arb_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "arb_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_arb_Latn"
4
+ "test_split": "arb_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ckb_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ckb_Arab"
4
+ "test_split": "ckb_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_ita_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ita_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ita_Latn"
4
+ "test_split": "ita_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_kin_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "kin_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_kin_Latn"
4
+ "test_split": "kin_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "mlt_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_mlt_Latn"
4
+ "test_split": "mlt_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_npi_Deva.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "npi_Deva"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_npi_Deva"
4
+ "test_split": "npi_Deva"
lm-evaluation/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "ssw_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_ssw_Latn"
4
+ "test_split": "ssw_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_tur_Latn.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "tur_Latn"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_tur_Latn"
4
+ "test_split": "tur_Latn"
lm-evaluation/lm_eval/tasks/belebele/belebele_urd_Arab.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "urd_Arab"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_urd_Arab"
4
+ "test_split": "urd_Arab"
lm-evaluation/lm_eval/tasks/belebele/belebele_zho_Hant.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ "fewshot_split": "zho_Hant"
2
+ "include": "_default_template_yaml"
3
+ "task": "belebele_zho_Hant"
4
+ "test_split": "zho_Hant"
lm-evaluation/lm_eval/tasks/fld/README.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FLD
2
+
3
+ ### Paper
4
+
5
+ Title: Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic
6
+
7
+ Abstract: https://arxiv.org/abs/2308.07336
8
+
9
+ **FLD** (**F**ormal **L**ogic **D**eduction) is a deductive reasoning benchmark.
10
+ Given a set of facts and a hypothesis, an LLM is required to generate (i) proof steps to (dis-)prove the hypothesis, and (ii) an answer ("proved", "disproved" or unknown").
11
+
12
+ Unique features of FLD are:
13
+ * It assesses the model's logical reasoning ability *isolated from knowledge*, as the facts are randomly constructed so that referring to existing knowledge never helps solve the task.
14
+ * It assesses diverse reasoning patterns (i.e., deduction rules), as it is based on formal logic theory.
15
+ * As a result, it is highly challenging. Indeed, even GPT-4 can solve only about half of the problems.
16
+
17
+ Homepage: https://github.com/hitachi-nlp/FLD
18
+
19
+
20
+ ### Citation
21
+
22
+ ```
23
+ @InProceedings{pmlr-v202-morishita23a,
24
+ title = {Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic},
25
+ author = {Morishita, Terufumi and Morio, Gaku and Yamaguchi, Atsuki and Sogawa, Yasuhiro},
26
+ booktitle = {Proceedings of the 40th International Conference on Machine Learning},
27
+ pages = {25254--25274},
28
+ year = {2023},
29
+ editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan},
30
+ volume = {202},
31
+ series = {Proceedings of Machine Learning Research},
32
+ month = {23--29 Jul},
33
+ publisher = {PMLR},
34
+ pdf = {https://proceedings.mlr.press/v202/morishita23a/morishita23a.pdf},
35
+ url = {https://proceedings.mlr.press/v202/morishita23a.html},
36
+ }
37
+ ```
38
+
39
+ ### Groups and Tasks
40
+
41
+ #### Groups
42
+
43
+ * `fld`
44
+
45
+ #### Tasks
46
+
47
+ This release is the simplified version of FLD where a model is required to predict only an answer.
48
+ This setting is described by "answer accuracy" in the original paper.
49
+
50
+ * `fld_default` is a basic task based on [FLD.v2](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star)
51
+ * `fld_star`: is a more challenging version based on [FLD.v2-star](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star)
52
+
53
+ ### Checklist
54
+
55
+ For adding novel benchmarks/datasets to the library:
56
+ * [x] Is the task an existing benchmark in the literature?
57
+ * [x] Have you referenced the original paper that introduced the task?
58
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
59
+
60
+
61
+ If other tasks on this dataset are already supported:
62
+ * [ ] Is the "Main" variant of this task clearly denoted?
63
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
64
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/fld/fld_default.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - fld
3
+ task: fld_default
4
+ dataset_path: hitachi-nlp/FLD.v2
5
+ dataset_name: default
6
+ training_split: train
7
+ validation_split: validation
8
+ test_split: test
9
+ doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. {{prompt_serial}}"
10
+ doc_to_target: world_assump_label
11
+ metric_list:
12
+ - metric: exact_match
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ filter_list:
16
+ - name: remove_whitespace
17
+ filter:
18
+ - function: remove_whitespace
19
+ - function: take_first
20
+ metadata:
21
+ version: 2.0
lm-evaluation/lm_eval/tasks/fld/fld_star.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: fld_default.yaml
2
+ task: fld_star
3
+ dataset_name: star
lm-evaluation/lm_eval/tasks/headqa/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HEAD-QA
2
+
3
+ ### Paper
4
+
5
+ HEAD-QA: A Healthcare Dataset for Complex Reasoning
6
+ https://arxiv.org/pdf/1906.04701.pdf
7
+
8
+ HEAD-QA is a multi-choice HEAlthcare Dataset. The questions come from exams to access a specialized position in the
9
+ Spanish healthcare system, and are challenging even for highly specialized humans. They are designed by the Ministerio
10
+ de Sanidad, Consumo y Bienestar Social.
11
+ The dataset contains questions about the following topics: medicine, nursing, psychology, chemistry, pharmacology and biology.
12
+
13
+ Homepage: https://aghie.github.io/head-qa/
14
+
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ @inproceedings{vilares-gomez-rodriguez-2019-head,
20
+ title = "{HEAD}-{QA}: A Healthcare Dataset for Complex Reasoning",
21
+ author = "Vilares, David and
22
+ G{\'o}mez-Rodr{\'i}guez, Carlos",
23
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
24
+ month = jul,
25
+ year = "2019",
26
+ address = "Florence, Italy",
27
+ publisher = "Association for Computational Linguistics",
28
+ url = "https://www.aclweb.org/anthology/P19-1092",
29
+ doi = "10.18653/v1/P19-1092",
30
+ pages = "960--966",
31
+ abstract = "We present HEAD-QA, a multi-choice question answering testbed to encourage research on complex reasoning. The questions come from exams to access a specialized position in the Spanish healthcare system, and are challenging even for highly specialized humans. We then consider monolingual (Spanish) and cross-lingual (to English) experiments with information retrieval and neural techniques. We show that: (i) HEAD-QA challenges current methods, and (ii) the results lag well behind human performance, demonstrating its usefulness as a benchmark for future work.",
32
+ }
33
+ ```
34
+
35
+ ### Groups and Tasks
36
+
37
+ #### Groups
38
+
39
+ - `headqa`: Evaluates `headqa_en` and `headqa_es`
40
+
41
+ #### Tasks
42
+
43
+ * `headqa_en` - English variant of HEAD-QA
44
+ * `headqa_es` - Spanish variant of HEAD-QA
45
+
46
+ ### Checklist
47
+
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [ ] Have you referenced the original paper that introduced the task?
50
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [x] Is the "Main" variant of this task clearly denoted?
55
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?\
57
+ * [x] Same as LM Evaluation Harness v0.3.0 implementation
lm-evaluation/lm_eval/tasks/headqa/headqa_en.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - headqa
3
+ task: headqa_en
4
+ dataset_path: EleutherAI/headqa
5
+ dataset_name: en
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{qtext}}\nAnswer:"
11
+ doc_to_target: "{{ra - 1}}"
12
+ doc_to_choice: "{{answers|map(attribute='atext')|list}}" # this will be cast to an int.
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: query
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ metadata:
23
+ version: 1.0
lm-evaluation/lm_eval/tasks/headqa/headqa_es.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: headqa_en.yaml
2
+ task: headqa_es
3
+ dataset_name: es
lm-evaluation/lm_eval/tasks/hellaswag/README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HellaSwag
2
+
3
+ ### Paper
4
+
5
+ Title: `HellaSwag: Can a Machine Really Finish Your Sentence?`
6
+
7
+ Abstract: https://arxiv.org/abs/1905.07830
8
+
9
+ Recent work by Zellers et al. (2018) introduced a new task of commonsense natural language inference: given an event description such as "A woman sits at a piano," a machine must select the most likely followup: "She sets her fingers on the keys." With the introduction of BERT, near human-level performance was reached. Does this mean that machines can perform human level commonsense inference?
10
+ In this paper, we show that commonsense inference still proves difficult for even state-of-the-art models, by presenting HellaSwag, a new challenge dataset. Though its questions are trivial for humans (>95% accuracy), state-of-the-art models struggle (<48%). We achieve this via Adversarial Filtering (AF), a data collection paradigm wherein a series of discriminators iteratively select an adversarial set of machine-generated wrong answers. AF proves to be surprisingly robust. The key insight is to scale up the length and complexity of the dataset examples towards a critical 'Goldilocks' zone wherein generated text is ridiculous to humans, yet often misclassified by state-of-the-art models.
11
+ Our construction of HellaSwag, and its resulting difficulty, sheds light on the inner workings of deep pretrained models. More broadly, it suggests a new path forward for NLP research, in which benchmarks co-evolve with the evolving state-of-the-art in an adversarial way, so as to present ever-harder challenges.
12
+
13
+ Homepage: `https://rowanzellers.com/hellaswag/`
14
+
15
+
16
+ ### Citation
17
+
18
+ ```
19
+ @inproceedings{zellers2019hellaswag,
20
+ title={HellaSwag: Can a Machine Really Finish Your Sentence?},
21
+ author={Zellers, Rowan and Holtzman, Ari and Bisk, Yonatan and Farhadi, Ali and Choi, Yejin},
22
+ booktitle ={Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics},
23
+ year={2019}
24
+ }
25
+ ```
26
+
27
+ ### Groups and Tasks
28
+
29
+ #### Groups
30
+
31
+ - Not part of a group yet
32
+
33
+ #### Tasks
34
+
35
+ - `hellaswag`
36
+
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [x] Is the task an existing benchmark in the literature?
42
+ * [x] Have you referenced the original paper that introduced the task?
43
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [ ] Is the "Main" variant of this task clearly denoted?
48
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/medqa/medqa.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: medqa_4options
2
+ dataset_path: GBaker/MedQA-USMLE-4-options-hf
3
+ output_type: multiple_choice
4
+ training_split: train
5
+ validation_split: validation
6
+ test_split: test
7
+ doc_to_text: !function preprocess_medqa.doc_to_text
8
+ doc_to_target: !function preprocess_medqa.doc_to_target
9
+ doc_to_choice: [ 'A', 'B', 'C', 'D' ]
10
+ metric_list:
11
+ - metric: acc
12
+ aggregation: mean
13
+ higher_is_better: true
14
+ - metric: acc_norm
15
+ aggregation: mean
16
+ higher_is_better: true
lm-evaluation/lm_eval/tasks/medqa/preprocess_medqa.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def doc_to_text(doc) -> str:
2
+ option_choices = {
3
+ "A": doc["ending0"],
4
+ "B": doc["ending1"],
5
+ "C": doc["ending2"],
6
+ "D": doc["ending3"],
7
+ }
8
+ answers = "".join((f"{k}. {v}\n") for k, v in option_choices.items())
9
+ return f"Question: {doc['sent1']}\n{answers}Answer:"
10
+
11
+
12
+ def doc_to_target(doc) -> int:
13
+ return doc["label"]
lm-evaluation/lm_eval/tasks/race/README.md ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RACE
2
+
3
+ ### Paper
4
+
5
+ Title: `RACE: Large-scale ReAding Comprehension Dataset From Examinations`
6
+
7
+ Abstract: https://arxiv.org/abs/1704.04683
8
+
9
+ RACE is a large-scale reading comprehension dataset with more than 28,000 passages
10
+ and nearly 100,000 questions. The dataset is collected from English examinations
11
+ in China, which are designed for middle school and high school students. The dataset
12
+ can be served as the training and test sets for machine comprehension.
13
+
14
+ Homepage: https://www.cs.cmu.edu/~glai1/data/race/
15
+
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @inproceedings{lai-etal-2017-race,
21
+ title = "{RACE}: Large-scale {R}e{A}ding Comprehension Dataset From Examinations",
22
+ author = "Lai, Guokun and
23
+ Xie, Qizhe and
24
+ Liu, Hanxiao and
25
+ Yang, Yiming and
26
+ Hovy, Eduard",
27
+ editor = "Palmer, Martha and
28
+ Hwa, Rebecca and
29
+ Riedel, Sebastian",
30
+ booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
31
+ month = sep,
32
+ year = "2017",
33
+ address = "Copenhagen, Denmark",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://aclanthology.org/D17-1082",
36
+ doi = "10.18653/v1/D17-1082",
37
+ pages = "785--794"
38
+ }
39
+ ```
40
+
41
+ ### Groups and Tasks
42
+
43
+ #### Groups
44
+
45
+ * Not part of a group yet.
46
+
47
+ #### Tasks
48
+
49
+ * `race`
50
+
51
+ ### Checklist
52
+
53
+ For adding novel benchmarks/datasets to the library:
54
+ * [ ] Is the task an existing benchmark in the literature?
55
+ * [ ] Have you referenced the original paper that introduced the task?
56
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
57
+
58
+
59
+ If other tasks on this dataset are already supported:
60
+ * [ ] Is the "Main" variant of this task clearly denoted?
61
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
62
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/lm_eval/tasks/race/preprocess_race.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+
3
+
4
+ def process_ast(string):
5
+ return ast.literal_eval(string)
6
+
7
+
8
+ def last_problem(doc):
9
+ return process_ast(doc["problems"])[-1]
10
+
11
+
12
+ def get_answer_option(problem):
13
+ letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
14
+ answer = letter_to_num[problem["answer"]]
15
+ return problem["options"][answer]
16
+
17
+
18
+ def doc_to_choice(doc):
19
+ problem = last_problem(doc)
20
+ choices = [problem["options"][i] for i in range(4)]
21
+ return choices
22
+
23
+
24
+ def doc_to_text(doc):
25
+ text = "Article: " + doc["article"] + "\n\n"
26
+ for problem in process_ast(doc["problems"])[:-1]:
27
+ if problem["question"][-6:] == " _ .":
28
+ text += problem["question"][-5:] + get_answer_option(problem) + "\n"
29
+ else:
30
+ question = "Question: " + problem["question"] + "\n"
31
+ answer = "Answer: " + get_answer_option(problem) + "\n"
32
+ text += question + answer
33
+ text += last_problem(doc)["question"]
34
+ return text
35
+
36
+
37
+ def doc_to_target(doc):
38
+ letter_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
39
+ answer = letter_to_num[last_problem(doc)["answer"]]
40
+ return answer
lm-evaluation/lm_eval/tasks/race/race.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: race
2
+ dataset_path: EleutherAI/race
3
+ dataset_name: high
4
+ output_type: multiple_choice
5
+ test_split: test
6
+ doc_to_text: !function preprocess_race.doc_to_text
7
+ doc_to_target: !function preprocess_race.doc_to_target
8
+ doc_to_choice: !function preprocess_race.doc_to_choice
9
+ metric_list:
10
+ - metric: acc
11
+ aggregation: mean
12
+ higher_is_better: true
13
+ metadata:
14
+ version: 2.0
15
+ dataset_kwargs:
16
+ trust_remote_code: true
lm-evaluation/lm_eval/tasks/tmmluplus/default/_default_template_yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: ZoneTwelve/tmmluplus # a copy of `ikala/tmmluplus`
2
+ test_split: test
3
+ fewshot_split: train
4
+ fewshot_config:
5
+ sampler: first_n
6
+ output_type: multiple_choice
7
+ process_docs: !function utils.process_docs
8
+ doc_to_text: "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:"
9
+ doc_to_choice: ["A", "B", "C", "D"]
10
+ doc_to_target: answer
11
+ metric_list:
12
+ - metric: acc
13
+ aggregation: mean
14
+ higher_is_better: true
15
+ - metric: acc_norm
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.1
lm-evaluation/lm_eval/tasks/tmmluplus/default/_generate_configs.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all "other" splits with this YAML
3
+ """
4
+ import argparse
5
+ import os
6
+
7
+ import pandas as pd
8
+ import yaml
9
+ from tqdm import tqdm
10
+
11
+
12
+ # Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py
13
+ # from TMMLU+ offical example
14
+ categories = {
15
+ "STEM": [
16
+ "physics",
17
+ "chemistry",
18
+ "biology",
19
+ "computer science",
20
+ "math",
21
+ "engineering",
22
+ ],
23
+ "humanities": ["history", "philosophy", "law"],
24
+ "social_sciences": [
25
+ "politics",
26
+ "culture",
27
+ "economics",
28
+ "geography",
29
+ "psychology",
30
+ "education",
31
+ ],
32
+ "other": ["other", "business", "health"], # (business, health, misc.)
33
+ }
34
+
35
+ task_list = [
36
+ "engineering_math",
37
+ "dentistry",
38
+ "traditional_chinese_medicine_clinical_medicine",
39
+ "clinical_psychology",
40
+ "technical",
41
+ "culinary_skills",
42
+ "mechanical",
43
+ "logic_reasoning",
44
+ "real_estate",
45
+ "general_principles_of_law",
46
+ "finance_banking",
47
+ "anti_money_laundering",
48
+ "ttqav2",
49
+ "marketing_management",
50
+ "business_management",
51
+ "organic_chemistry",
52
+ "advance_chemistry",
53
+ "physics",
54
+ "secondary_physics",
55
+ "human_behavior",
56
+ "national_protection",
57
+ "jce_humanities",
58
+ "politic_science",
59
+ "agriculture",
60
+ "official_document_management",
61
+ "financial_analysis",
62
+ "pharmacy",
63
+ "educational_psychology",
64
+ "statistics_and_machine_learning",
65
+ "management_accounting",
66
+ "introduction_to_law",
67
+ "computer_science",
68
+ "veterinary_pathology",
69
+ "accounting",
70
+ "fire_science",
71
+ "optometry",
72
+ "insurance_studies",
73
+ "pharmacology",
74
+ "taxation",
75
+ "education_(profession_level)",
76
+ "economics",
77
+ "veterinary_pharmacology",
78
+ "nautical_science",
79
+ "occupational_therapy_for_psychological_disorders",
80
+ "trust_practice",
81
+ "geography_of_taiwan",
82
+ "physical_education",
83
+ "auditing",
84
+ "administrative_law",
85
+ "basic_medical_science",
86
+ "macroeconomics",
87
+ "trade",
88
+ "chinese_language_and_literature",
89
+ "tve_design",
90
+ "junior_science_exam",
91
+ "junior_math_exam",
92
+ "junior_chinese_exam",
93
+ "junior_social_studies",
94
+ "tve_mathematics",
95
+ "tve_chinese_language",
96
+ "tve_natural_sciences",
97
+ "junior_chemistry",
98
+ "music",
99
+ "education",
100
+ "three_principles_of_people",
101
+ "taiwanese_hokkien",
102
+ ]
103
+ subject2name = {}
104
+ # subject2category = {}
105
+ SUBJECTS = {}
106
+
107
+
108
+ def parse_args():
109
+ parser = argparse.ArgumentParser()
110
+ parser.add_argument("--base_yaml_path", required=True)
111
+ parser.add_argument("--save_prefix_path", default="tmmluplus")
112
+ parser.add_argument("--cot_prompt_path", default=None)
113
+ parser.add_argument("--task_prefix", default="")
114
+ parser.add_argument("--group_prefix", default="")
115
+ parser.add_argument("--subject_file", default="subject.tsv")
116
+ return parser.parse_args()
117
+
118
+
119
+ if __name__ == "__main__":
120
+ args = parse_args()
121
+ from pathlib import Path
122
+
123
+ # Initialization
124
+ SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
125
+
126
+ df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
127
+
128
+ for _, row in df.iterrows():
129
+ for _c in categories:
130
+ if row["subject"] in SUBJECTS:
131
+ raise ValueError("Duplicate tasks.")
132
+ if row["category"] in categories[_c]: # append new item into SUBJECTS
133
+ SUBJECTS[row["subject"]] = _c
134
+ subject2name[row["subject"]] = row["name"]
135
+ break
136
+ # End of SUBJECTS initialization
137
+
138
+ # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
139
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
140
+ with open(args.base_yaml_path) as f:
141
+ base_yaml = yaml.full_load(f)
142
+
143
+ if args.cot_prompt_path is not None:
144
+ import json
145
+
146
+ with open(args.cot_prompt_path) as f:
147
+ cot_file = json.load(f)
148
+
149
+ ALL_CATEGORIES = []
150
+ for subject, category in tqdm(SUBJECTS.items()):
151
+ if category not in ALL_CATEGORIES:
152
+ ALL_CATEGORIES.append(category)
153
+
154
+ if args.cot_prompt_path is not None:
155
+ description = cot_file[subject]
156
+ else:
157
+ name_of_subject = subject2name[subject].replace("_", " ")
158
+ description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
159
+ # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
160
+
161
+ yaml_dict = {
162
+ "include": base_yaml_name,
163
+ "group": f"tmmluplus_{args.task_prefix}_{category}"
164
+ if args.task_prefix != ""
165
+ else f"tmmluplus_{category}",
166
+ "group_alias": category.replace("_", " "),
167
+ "task": f"tmmluplus_{args.task_prefix}_{subject}"
168
+ if args.task_prefix != ""
169
+ else f"tmmluplus_{subject}",
170
+ "task_alias": subject.replace("_", " "),
171
+ "dataset_name": subject,
172
+ "description": description,
173
+ }
174
+
175
+ file_save_path = args.save_prefix_path + f"_{subject}.yaml"
176
+ # eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
177
+ with open(file_save_path, "w") as yaml_file:
178
+ yaml.dump(
179
+ yaml_dict,
180
+ yaml_file,
181
+ # width=float("inf"),
182
+ allow_unicode=True,
183
+ default_style='"',
184
+ )
185
+
186
+ if args.task_prefix != "":
187
+ mmlu_subcategories = [
188
+ f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
189
+ ]
190
+ else:
191
+ mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES]
192
+
193
+ if args.group_prefix != "":
194
+ file_save_path = args.group_prefix + ".yaml"
195
+ else:
196
+ file_save_path = args.save_prefix_path + ".yaml"
197
+
198
+ # eval_logger.info(f"Saving benchmark config to {file_save_path}")
199
+ with open(file_save_path, "w") as yaml_file:
200
+ yaml.dump(
201
+ {
202
+ "group": f"tmmluplus_{args.task_prefix}"
203
+ if args.task_prefix != ""
204
+ else "tmmluplus",
205
+ "task": mmlu_subcategories,
206
+ },
207
+ yaml_file,
208
+ indent=4,
209
+ default_flow_style=False,
210
+ )
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ group: tmmluplus
2
+ task:
3
+ - tmmluplus_other
4
+ - tmmluplus_social_sciences
5
+ - tmmluplus_humanities
6
+ - tmmluplus_STEM
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "administrative_law"
2
+ "description": "以下為行政法的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_administrative_law"
7
+ "task_alias": "administrative law"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "agriculture"
2
+ "description": "以下為農業的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_agriculture"
7
+ "task_alias": "agriculture"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "anti_money_laundering"
2
+ "description": "以下為洗錢防制的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_anti_money_laundering"
7
+ "task_alias": "anti money laundering"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "auditing"
2
+ "description": "以下為審計學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_auditing"
7
+ "task_alias": "auditing"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "business_management"
2
+ "description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_business_management"
7
+ "task_alias": "business management"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "clinical_psychology"
2
+ "description": "以下為臨床心理學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_clinical_psychology"
7
+ "task_alias": "clinical psychology"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "culinary_skills"
2
+ "description": "以下為餐旅的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_culinary_skills"
7
+ "task_alias": "culinary skills"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "dentistry"
2
+ "description": "以下為牙醫學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_dentistry"
7
+ "task_alias": "dentistry"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "economics"
2
+ "description": "以下為經濟學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_economics"
7
+ "task_alias": "economics"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "education"
2
+ "description": "以下為教育常識的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_education"
7
+ "task_alias": "education"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "education_(profession_level)"
2
+ "description": "以下為教育專業的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_education_(profession_level)"
7
+ "task_alias": "education (profession level)"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "finance_banking"
2
+ "description": "以下為金融與法規的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_finance_banking"
7
+ "task_alias": "finance banking"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "financial_analysis"
2
+ "description": "以下為財務分析的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_financial_analysis"
7
+ "task_alias": "financial analysis"
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "general_principles_of_law"
2
+ "description": "以下為法學大意的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_general_principles_of_law"
7
+ "task_alias": "general principles of law"