applied-ai-018 commited on
Commit
faac595
·
verified ·
1 Parent(s): 01aebd1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/csatqa/_default_csatqa_yaml +17 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/csatqa/utils.py +20 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +26 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +38 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/utils.py +39 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/README.md +54 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml +3 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml +3 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml +3 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml +3 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml +3 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml +3 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml +3 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml +3 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml +3 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml +3 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml +3 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml +3 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml +3 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml +3 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml +3 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml +3 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml +3 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml +3 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml +3 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml +3 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml +3 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml +3 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml +3 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml +3 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml +3 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml +3 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml +3 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/qasper/README.md +63 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/qasper/bool.yaml +14 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/qasper/freeform.yaml +18 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/qasper/metrics.py +41 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/qasper/utils.py +72 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/siqa/README.md +37 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/siqa/siqa.yaml +19 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_ar-en.yaml +13 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_en-ar.yaml +13 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/translation/utils.py +118 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/translation/wmt14_fr-en.yaml +14 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_de-en.yaml +14 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-de.yaml +14 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-ro.yaml +14 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_ro-en.yaml +14 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/unscramble/README.md +57 -0
lm-evaluation/build/lib/lm_eval/tasks/csatqa/_default_csatqa_yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: csatqa
2
+ dataset_path: EleutherAI/csatqa
3
+ test_split: test
4
+ output_type: multiple_choice
5
+ process_docs: !function utils.process_docs
6
+ doc_to_text: "{{question}}"
7
+ doc_to_choice: "{{choices}}"
8
+ doc_to_target: "{{gold}}"
9
+ metric_list:
10
+ - metric: acc
11
+ aggregation: mean
12
+ higher_is_better: true
13
+ - metric: acc_norm
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ metadata:
17
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/csatqa/utils.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+
4
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
5
+ def _process_doc(doc):
6
+ instruction = f"""다음을 읽고 정답으로 알맞은 것을 고르시요.
7
+ ### Context: {doc["context"]}
8
+ ### Question: {doc["question"]}
9
+ ### Options:
10
+ (1) {doc['option#1']}\n(2) {doc["option#2"]}\n(3) {doc["option#3"]}\n(4) {doc['option#4']}\n(5) {doc['option#5']}
11
+ ### Answer: 주어진 문제의 정답은"""
12
+
13
+ out_doc = {
14
+ "question": instruction,
15
+ "choices": ["(1)", "(2)", "(3)", "(4)", "(5)"],
16
+ "gold": int(doc["gold"]) - 1,
17
+ }
18
+ return out_doc
19
+
20
+ return dataset.map(_process_doc)
lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from tqdm import tqdm
3
+
4
+
5
+ def main() -> None:
6
+ subset = ["extended", "diamond", "main"]
7
+ setting = "cot_zeroshot"
8
+ for task in tqdm(subset):
9
+ file_name = f"gpqa_{task}_{setting}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": f"_gpqa_{setting}_yaml",
16
+ "task": f"gpqa_{task}_{setting}",
17
+ "dataset_name": f"gpqa_{task}",
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_path: Idavidrein/gpqa
2
+ group: gpqa
3
+ output_type: generate_until
4
+ process_docs: !function utils.process_docs
5
+ training_split: train
6
+ # Because huggingface dataset only has train split
7
+ validation_split: train
8
+ test_split: null
9
+ doc_to_text: "What is the correct answer to this question:{{Question}}\nChoices:\n(A) {{choice1}}\n(B) {{choice2}}\n(C) {{choice3}}\n(D) {{choice4}}\nLet's think step by step: "
10
+ doc_to_target: answer
11
+ filter_list:
12
+ - name: "strict-match"
13
+ filter:
14
+ - function: "regex"
15
+ regex_pattern: "(?<=The answer is )(.*)(?=.)"
16
+ - function: "take_first"
17
+ - name: "flexible-extract"
18
+ filter:
19
+ - function: "multi_choice_regex"
20
+ group_select: -1
21
+ ignore_case: true
22
+ ignore_punctuation: true
23
+ regex_pattern: "(\\([A-Z]\\))"
24
+ - function: "take_first"
25
+ generation_kwargs:
26
+ until:
27
+ - "</s>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ num_fewshot: 0
31
+ metric_list:
32
+ - metric: exact_match
33
+ aggregation: mean
34
+ higher_is_better: true
35
+ ignore_case: true
36
+ ignore_punctuation: true
37
+ metadata:
38
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: gpqa_diamond
3
+ include: _gpqa_cot_zeroshot_yaml
4
+ task: gpqa_diamond_cot_zeroshot
lm-evaluation/build/lib/lm_eval/tasks/gpqa/cot_zeroshot/utils.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import re
3
+
4
+ import datasets
5
+
6
+
7
+ def preprocess(text):
8
+ if text is None:
9
+ return " "
10
+ text = text.strip()
11
+ text = text.replace(" [title]", ". ")
12
+ text = re.sub("\\[.*?\\]", "", text)
13
+ text = text.replace(" ", " ")
14
+ return text
15
+
16
+
17
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
18
+ def _process_doc(doc):
19
+ choices = [
20
+ preprocess(doc["Incorrect Answer 1"]),
21
+ preprocess(doc["Incorrect Answer 2"]),
22
+ preprocess(doc["Incorrect Answer 3"]),
23
+ preprocess(doc["Correct Answer"]),
24
+ ]
25
+
26
+ random.shuffle(choices)
27
+ correct_answer_index = choices.index(preprocess(doc["Correct Answer"]))
28
+
29
+ out_doc = {
30
+ "choice1": choices[0],
31
+ "choice2": choices[1],
32
+ "choice3": choices[2],
33
+ "choice4": choices[3],
34
+ "choices": [choices[0], choices[1], choices[2], choices[3]],
35
+ "answer": f"({chr(65 + correct_answer_index)})",
36
+ }
37
+ return out_doc
38
+
39
+ return dataset.map(_process_doc)
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # k_mmlu
2
+
3
+ ### Paper
4
+
5
+ Title: `KMMLU : Measuring Massive Multitask Language Understanding in Korean`
6
+
7
+ Abstract: `We propose KMMLU, a new Korean benchmark with 35,030 expert-level multiple-choice questions across 45 subjects ranging from humanities to STEM. Unlike previous Korean benchmarks that are translated from existing English benchmarks, KMMLU is collected from original Korean exams, capturing linguistic and cultural aspects of the Korean language. We test 26 publicly available and proprietary LLMs, identifying significant room for improvement. The best publicly available model achieves 50.54% on KMMLU, far below the average human performance of 62.6%. This model was primarily trained for English and Chinese, not Korean. Current LLMs tailored to Korean, such as Polyglot-Ko, perform far worse. Surprisingly, even the most capable proprietary LLMs, e.g., GPT-4 and HyperCLOVA X, achieve 59.95% and 53.40%, respectively. This suggests that further work is needed to improve Korean LLMs, and KMMLU offers the right tool to track this progress. We make our dataset publicly available on the Hugging Face Hub and integrate the benchmark into EleutherAI's Language Model Evaluation Harness.`
8
+
9
+ Note: lm-eval-harness is using the micro average as the default. To replicate the test results in the paper, take the macro average for the scores evaluated with lm-eval-harness
10
+
11
+ Homepage: https://huggingface.co/datasets/HAERAE-HUB/KMMLU
12
+
13
+ ### Citation
14
+
15
+ @article{son2024kmmlu,
16
+ title={KMMLU: Measuring Massive Multitask Language Understanding in Korean},
17
+ author={Guijin Son and Hanwool Lee and Sungdong Kim and Seungone Kim and Niklas Muennighoff and Taekyoon Choi and Cheonbok Park and Kang Min Yoo and Stella Biderman},
18
+ journal={arXiv preprint arXiv:2402.11548},
19
+ year={2024}
20
+ }
21
+
22
+ ### Groups and Tasks
23
+
24
+ #### Groups
25
+
26
+ * `kmmlu`: 'All 45 subjects of the KMMLU dataset, evaluated following the methodology in MMLU's original implementation'
27
+ * `kmmlu_direct`: 'kmmlu_direct solves questions using a straightforward *generative* multiple-choice question-answering approach'
28
+ * `kmmlu_hard`: 'kmmlu_hard comprises difficult questions that at least one proprietary model failed to answer correctly using log-likelihood approach'
29
+ * `kmmlu_hard_direct`: 'kmmlu_hard_direct solves questions of kmmlu_hard using direct(generative) approach'
30
+ * `kmmlu_hard_cot`: 'kmmlu_hard_cot includes 5-shot of exemplars for chain-of-thought approach'
31
+
32
+ #### Tasks
33
+
34
+ The following tasks evaluate subjects in the KMMLU dataset
35
+ - `kmmlu_direct_{subject_english}`
36
+
37
+ The following tasks evaluate subjects in the KMMLU-Hard dataset
38
+ - `kmmlu_hard_{subject_english}`
39
+ - `kmmlu_hard_cot_{subject_english}`
40
+ - `kmmlu_hard_direct_{subject_english}`
41
+
42
+
43
+ ### Checklist
44
+
45
+ For adding novel benchmarks/datasets to the library:
46
+ * [x] Is the task an existing benchmark in the literature?
47
+ * [x] Have you referenced the original paper that introduced the task?
48
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
49
+
50
+
51
+ If other tasks on this dataset are already supported:
52
+ * [ ] Is the "Main" variant of this task clearly denoted?
53
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
54
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Biology
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_biology
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Chemical-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_chemical_engineering
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Chemistry
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_chemistry
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Computer-Science
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_computer_science
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Criminal-Law
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_criminal_law
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Economics
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_economics
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Electrical-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_electrical_engineering
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Energy-Management
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_energy_management
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Environmental-Science
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_environmental_science
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Food-Processing
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_food_processing
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Health
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_health
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Industrial-Engineer
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_industrial_engineer
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Korean-History
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_korean_history
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Law
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_law
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Machine-Design-and-Manufacturing
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_machine_design_and_manufacturing
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Management
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_management
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Maritime-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_maritime_engineering
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Math
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_math
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Mechanical-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_mechanical_engineering
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Nondestructive-Testing
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_nondestructive_testing
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Patent
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_patent
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Political-Science-and-Sociology
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_political_science_and_sociology
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Public-Safety
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_public_safety
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Railway-and-Automotive-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_railway_and_automotive_engineering
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Refrigerating-Machinery
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_refrigerating_machinery
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Taxation
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_taxation
lm-evaluation/build/lib/lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Telecommunications-and-Wireless-Technology
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_telecommunications_and_wireless_technology
lm-evaluation/build/lib/lm_eval/tasks/qasper/README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # QASPER
2
+
3
+ ### Paper
4
+
5
+ Title: `A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers`
6
+
7
+ Abstract: https://arxiv.org/abs/2105.03011
8
+
9
+ QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers.
10
+ Each question is written by an NLP practitioner who read only the title and abstract
11
+ of the corresponding paper, and the question seeks information present in the full
12
+ text. The questions are then answered by a separate set of NLP practitioners who also
13
+ provide supporting evidence to answers.
14
+
15
+ Homepage: https://allenai.org/data/qasper
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @article{DBLP:journals/corr/abs-2105-03011,
21
+ author = {Pradeep Dasigi and
22
+ Kyle Lo and
23
+ Iz Beltagy and
24
+ Arman Cohan and
25
+ Noah A. Smith and
26
+ Matt Gardner},
27
+ title = {A Dataset of Information-Seeking Questions and Answers Anchored in
28
+ Research Papers},
29
+ journal = {CoRR},
30
+ volume = {abs/2105.03011},
31
+ year = {2021},
32
+ url = {https://arxiv.org/abs/2105.03011},
33
+ eprinttype = {arXiv},
34
+ eprint = {2105.03011},
35
+ timestamp = {Fri, 14 May 2021 12:13:30 +0200},
36
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib},
37
+ bibsource = {dblp computer science bibliography, https://dblp.org}
38
+ }
39
+ ```
40
+
41
+ ### Groups and Tasks
42
+
43
+ #### Groups
44
+
45
+ * `qasper`: executes both `qasper_bool` and `qasper_freeform`
46
+
47
+ #### Tasks
48
+
49
+ * `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"`
50
+ * `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"`
51
+
52
+ ### Checklist
53
+
54
+ For adding novel benchmarks/datasets to the library:
55
+ * [ ] Is the task an existing benchmark in the literature?
56
+ * [ ] Have you referenced the original paper that introduced the task?
57
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
58
+
59
+
60
+ If other tasks on this dataset are already supported:
61
+ * [ ] Is the "Main" variant of this task clearly denoted?
62
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
63
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/qasper/bool.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: qasper
2
+ task: qasper_bool
3
+ dataset_path: allenai/qasper
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ process_docs: !function utils.process_docs_bool
8
+ doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:"
9
+ doc_to_target: 1
10
+ doc_to_choice: ["no", "yes"]
11
+ metric_list:
12
+ - metric: f1
13
+ metadata:
14
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/qasper/freeform.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: qasper
2
+ task: qasper_freeform
3
+ dataset_path: allenai/qasper
4
+ output_type: generate_until
5
+ training_split: train
6
+ validation_split: validation
7
+ process_docs: !function utils.process_docs_freeform
8
+ doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:"
9
+ doc_to_target: answer
10
+ generation_kwargs:
11
+ until:
12
+ - "\n"
13
+ metric_list:
14
+ - metric: !function metrics.f1_abstractive
15
+ aggregation: mean
16
+ higher_is_better: true
17
+ metadata:
18
+ version: 2.0
lm-evaluation/build/lib/lm_eval/tasks/qasper/metrics.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import string
3
+ from collections import Counter
4
+
5
+
6
+ def normalize_answer(s):
7
+ """
8
+ Taken from the official evaluation script for v1.1 of the SQuAD dataset.
9
+ Lower text and remove punctuation, articles and extra whitespace.
10
+ """
11
+
12
+ def remove_articles(text):
13
+ return re.sub(r"\b(a|an|the)\b", " ", text)
14
+
15
+ def white_space_fix(text):
16
+ return " ".join(text.split())
17
+
18
+ def remove_punc(text):
19
+ exclude = set(string.punctuation)
20
+ return "".join(ch for ch in text if ch not in exclude)
21
+
22
+ def lower(text):
23
+ return text.lower()
24
+
25
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
26
+
27
+
28
+ def f1_abstractive(predictions, references):
29
+ """
30
+ Taken from the official evaluation script for v1.1 of the SQuAD dataset.
31
+ """
32
+ prediction_tokens = normalize_answer(predictions[0]).split()
33
+ references_tokens = normalize_answer(references[0]).split()
34
+ common = Counter(prediction_tokens) & Counter(references_tokens)
35
+ num_same = sum(common.values())
36
+ if num_same == 0:
37
+ return 0
38
+ precision = 1.0 * num_same / len(prediction_tokens)
39
+ recall = 1.0 * num_same / len(references_tokens)
40
+ f1 = (2 * precision * recall) / (precision + recall)
41
+ return f1
lm-evaluation/build/lib/lm_eval/tasks/qasper/utils.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ from datasets import Dataset
4
+
5
+
6
+ def process_docs(dataset, set_answer_type="bool"):
7
+ FEATURES = ["title", "abstract", "question", "answer", "answer_type"]
8
+
9
+ def _categorise_answer(answer_blob):
10
+ if answer_blob["unanswerable"]:
11
+ answer = "unanswerable"
12
+ answer_type = "unanswerable"
13
+ return answer, answer_type
14
+ elif answer_blob["yes_no"]:
15
+ answer = "yes"
16
+ answer_type = "bool"
17
+ return answer, answer_type
18
+ elif answer_blob["free_form_answer"]:
19
+ answer = answer_blob["free_form_answer"]
20
+ answer_type = "free form answer"
21
+ return answer, answer_type
22
+ elif answer_blob["extractive_spans"]:
23
+ answer = answer_blob["extractive_spans"]
24
+ answer_type = "extractive_spans"
25
+ return answer, answer_type
26
+ elif answer_blob["yes_no"] is False:
27
+ answer = "no"
28
+ answer_type = "bool"
29
+ return answer, answer_type
30
+
31
+ def _flatten(doc):
32
+ """Given a `doc`, flatten it out so that each JSON blob
33
+ contains exactly one question and one answer. Logic taken from
34
+ the reference implementation available at
35
+ https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py
36
+ """
37
+ obs_list = {
38
+ "title": [],
39
+ "abstract": [],
40
+ "question": [],
41
+ "answer": [],
42
+ "answer_type": [],
43
+ }
44
+ title = doc.pop("title")
45
+ abstract = doc.pop("abstract")
46
+ for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]):
47
+ for answer_blob in answer_list["answer"]:
48
+ answer, answer_type = _categorise_answer(answer_blob)
49
+ if answer_type == set_answer_type:
50
+ obs_list["title"].append(title)
51
+ obs_list["abstract"].append(abstract)
52
+ obs_list["question"].append(question)
53
+ obs_list["answer_type"].append(answer_type)
54
+ if isinstance(answer, list):
55
+ answer = ", ".join(answer)
56
+ obs_list["answer"].append(answer)
57
+
58
+ return obs_list
59
+
60
+ dataset = dataset.map(
61
+ _flatten,
62
+ remove_columns=[key for key in dataset.features.keys() if key not in FEATURES],
63
+ )
64
+ new_dataset = {}
65
+ for key in dataset.features.keys():
66
+ new_dataset[key] = [x for row in dataset[key] for x in row]
67
+
68
+ return Dataset.from_dict(new_dataset)
69
+
70
+
71
+ process_docs_bool = partial(process_docs, set_answer_type="bool")
72
+ process_docs_freeform = partial(process_docs, set_answer_type="free form answer")
lm-evaluation/build/lib/lm_eval/tasks/siqa/README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Social IQA
2
+
3
+ ### Paper
4
+
5
+ Title: Social IQA: Commonsense Reasoning about Social Interactions
6
+
7
+ Abstract: https://arxiv.org/abs/1904.09728
8
+
9
+ > We introduce Social IQa, the first largescale benchmark for commonsense reasoning about social situations. Social IQa contains 38,000 multiple choice questions for probing emotional and social intelligence in a variety of everyday situations (e.g., Q: "Jordan wanted to tell Tracy a secret, so Jordan leaned towards Tracy. Why did Jordan do this?" A: "Make sure no one else could hear"). Through crowdsourcing, we collect commonsense questions along with correct and incorrect answers about social interactions, using a new framework that mitigates stylistic artifacts in incorrect answers by asking workers to provide the right answer to a different but related question. Empirical results show that our benchmark is challenging for existing question-answering models based on pretrained language models, compared to human performance (>20% gap). Notably, we further establish Social IQa as a resource for transfer learning of commonsense knowledge, achieving state-of-the-art performance on multiple commonsense reasoning tasks (Winograd Schemas, COPA).
10
+
11
+ Homepage: https://allenai.org/data/socialiqa
12
+
13
+
14
+ ### Citation
15
+
16
+ ```
17
+ @inproceedings{sap2019social,
18
+ title={Social IQa: Commonsense Reasoning about Social Interactions},
19
+ author={Sap, Maarten and Rashkin, Hannah and Chen, Derek and Le Bras, Ronan and Choi, Yejin},
20
+ booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
21
+ pages={4463--4473},
22
+ year={2019}
23
+ }
24
+ ```
25
+
26
+ ### Checklist
27
+
28
+ For adding novel benchmarks/datasets to the library:
29
+ * [X] Is the task an existing benchmark in the literature?
30
+ * [X] Have you referenced the original paper that introduced the task?
31
+ * [X] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? The original paper doesn't have an associated implementation, but there is an official entry in [BigBench](https://github.com/google/BIG-bench/tree/main/bigbench/benchmark_tasks/social_iqa). I use the same prompting format as BigBench.
32
+
33
+
34
+ If other tasks on this dataset are already supported:
35
+ * [ ] Is the "Main" variant of this task clearly denoted?
36
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
37
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation/build/lib/lm_eval/tasks/siqa/siqa.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: social_iqa
2
+ dataset_path: social_i_qa
3
+ dataset_name: null
4
+ output_type: multiple_choice
5
+ training_split: train
6
+ validation_split: validation
7
+ doc_to_text: "Q: {{context}} {{question}}\nA:"
8
+ target_delimiter: " "
9
+ doc_to_choice:
10
+ - "{{answerA}}"
11
+ - "{{answerB}}"
12
+ - "{{answerC}}"
13
+ doc_to_target: "{{ (label|int) - 1 }}"
14
+ metric_list:
15
+ - metric: acc
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ metadata:
19
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_ar-en.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: iwslt2017-en-ar
3
+ dataset_path: iwslt2017
4
+ doc_to_target: ' {{translation["en"]}}'
5
+ doc_to_text: 'Arabic phrase: {{translation["ar"]}}
6
+
7
+ English phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - iwslt2017
12
+ include: wmt_common_yaml
13
+ task: iwslt2017-ar-en
lm-evaluation/build/lib/lm_eval/tasks/translation/iwslt2017_en-ar.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: iwslt2017-en-ar
3
+ dataset_path: iwslt2017
4
+ doc_to_target: ' {{translation["ar"]}}'
5
+ doc_to_text: 'English phrase: {{translation["en"]}}
6
+
7
+ Arabic phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - iwslt2017
12
+ include: wmt_common_yaml
13
+ task: iwslt2017-en-ar
lm-evaluation/build/lib/lm_eval/tasks/translation/utils.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import yaml
4
+
5
+
6
+ try:
7
+ import pycountry
8
+ except ModuleNotFoundError:
9
+ raise Exception(
10
+ "`pycountry` is required for generating translation task prompt templates. \
11
+ please install pycountry via pip install lm-eval[multilingual] or pip install -e .[multilingual]",
12
+ )
13
+
14
+
15
+ # Different translation benchmarks included in the library. Mostly WMT.
16
+ # These correspond to dataset names (subsets) on HuggingFace for each dataset.
17
+ # A yaml file is generated by this script for each language pair.
18
+
19
+ gpt3_translation_benchmarks = {
20
+ "wmt14": ["fr-en"], # ["en-fr", "fr-en"], # French
21
+ "wmt16": [
22
+ "ro-en",
23
+ "de-en",
24
+ ], # ["en-ro", "ro-en", "de-en", "en-de"], # German, Romanian
25
+ }
26
+
27
+ # 28 total
28
+ LANGUAGES = {
29
+ **gpt3_translation_benchmarks,
30
+ # "wmt20": sacrebleu.get_langpairs_for_testset("wmt20"),
31
+ "iwslt2017": ["en-ar"], # Arabic
32
+ }
33
+
34
+
35
+ def code_to_language(code):
36
+ # key is alpha_2 or alpha_3 depending on the code length
37
+ language_tuple = pycountry.languages.get(**{f"alpha_{len(code)}": code})
38
+ return language_tuple.name
39
+
40
+
41
+ def gen_lang_yamls(output_dir: str, overwrite: bool) -> None:
42
+ """
43
+ Generate a yaml file for each language.
44
+
45
+ :param output_dir: The directory to output the files to.
46
+ :param overwrite: Whether to overwrite files if they already exist.
47
+ """
48
+ err = []
49
+ for lang in LANGUAGES.keys():
50
+ for dataset_name in LANGUAGES[lang]:
51
+ src_lang, _, tgt_lang = dataset_name.partition("-")
52
+ for src, tgt in [[src_lang, tgt_lang], [tgt_lang, src_lang]]:
53
+ # both translation directions for each lang pair
54
+ lang_pair = src + "-" + tgt
55
+ file_name = f"{lang}_{lang_pair}.yaml"
56
+ try:
57
+ source, target = code_to_language(src), code_to_language(tgt)
58
+
59
+ groups = ["generate_until", "translation", lang]
60
+ if lang in gpt3_translation_benchmarks.keys():
61
+ groups += ["gpt3_translation_benchmarks"]
62
+
63
+ with open(
64
+ f"{output_dir}/{file_name}",
65
+ "w" if overwrite else "x",
66
+ encoding="utf8",
67
+ ) as f:
68
+ f.write("# Generated by utils.py\n")
69
+ yaml.dump(
70
+ {
71
+ "include": "wmt_common_yaml",
72
+ "group": groups,
73
+ "dataset_path": lang,
74
+ "dataset_name": dataset_name
75
+ if not (lang == "iwslt2017")
76
+ else "iwslt2017-" + dataset_name,
77
+ "task": f"{lang}-{lang_pair}",
78
+ "doc_to_text": f"{source} phrase: "
79
+ + "{{translation["
80
+ + f'"{src}"'
81
+ + "]}}\n"
82
+ + f"{target} phrase:",
83
+ "doc_to_target": " {{"
84
+ + "translation["
85
+ + f'"{tgt}"]'
86
+ + "}}",
87
+ },
88
+ f,
89
+ )
90
+ except FileExistsError:
91
+ err.append(file_name)
92
+
93
+ if len(err) > 0:
94
+ raise FileExistsError(
95
+ "Files were not created because they already exist (use --overwrite flag):"
96
+ f" {', '.join(err)}"
97
+ )
98
+
99
+
100
+ def main() -> None:
101
+ """Parse CLI args and generate language-specific yaml files."""
102
+ parser = argparse.ArgumentParser()
103
+ parser.add_argument(
104
+ "--overwrite",
105
+ default=False,
106
+ action="store_true",
107
+ help="Overwrite files if they already exist",
108
+ )
109
+ parser.add_argument(
110
+ "--output-dir", default=".", help="Directory to write yaml files to"
111
+ )
112
+ args = parser.parse_args()
113
+
114
+ gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
115
+
116
+
117
+ if __name__ == "__main__":
118
+ main()
lm-evaluation/build/lib/lm_eval/tasks/translation/wmt14_fr-en.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: fr-en
3
+ dataset_path: wmt14
4
+ doc_to_target: ' {{translation["en"]}}'
5
+ doc_to_text: 'French phrase: {{translation["fr"]}}
6
+
7
+ English phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - wmt14
12
+ - gpt3_translation_benchmarks
13
+ include: wmt_common_yaml
14
+ task: wmt14-fr-en
lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_de-en.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: de-en
3
+ dataset_path: wmt16
4
+ doc_to_target: ' {{translation["en"]}}'
5
+ doc_to_text: 'German phrase: {{translation["de"]}}
6
+
7
+ English phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - wmt16
12
+ - gpt3_translation_benchmarks
13
+ include: wmt_common_yaml
14
+ task: wmt16-de-en
lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-de.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: de-en
3
+ dataset_path: wmt16
4
+ doc_to_target: ' {{translation["de"]}}'
5
+ doc_to_text: 'English phrase: {{translation["en"]}}
6
+
7
+ German phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - wmt16
12
+ - gpt3_translation_benchmarks
13
+ include: wmt_common_yaml
14
+ task: wmt16-en-de
lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_en-ro.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ro-en
3
+ dataset_path: wmt16
4
+ doc_to_target: ' {{translation["ro"]}}'
5
+ doc_to_text: 'English phrase: {{translation["en"]}}
6
+
7
+ Romanian phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - wmt16
12
+ - gpt3_translation_benchmarks
13
+ include: wmt_common_yaml
14
+ task: wmt16-en-ro
lm-evaluation/build/lib/lm_eval/tasks/translation/wmt16_ro-en.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ro-en
3
+ dataset_path: wmt16
4
+ doc_to_target: ' {{translation["en"]}}'
5
+ doc_to_text: 'Romanian phrase: {{translation["ro"]}}
6
+
7
+ English phrase:'
8
+ group:
9
+ - generate_until
10
+ - translation
11
+ - wmt16
12
+ - gpt3_translation_benchmarks
13
+ include: wmt_common_yaml
14
+ task: wmt16-ro-en
lm-evaluation/build/lib/lm_eval/tasks/unscramble/README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Unscramble
2
+
3
+ ### Paper
4
+
5
+ Language Models are Few-Shot Learners
6
+ https://arxiv.org/pdf/2005.14165.pdf
7
+
8
+ Unscramble is a small battery of 5 “character manipulation” tasks. Each task
9
+ involves giving the model a word distorted by some combination of scrambling,
10
+ addition, or deletion of characters, and asking it to recover the original word.
11
+
12
+ Homepage: https://github.com/openai/gpt-3/tree/master/data
13
+
14
+
15
+ ### Citation
16
+
17
+ ```
18
+ @inproceedings{NEURIPS2020_1457c0d6,
19
+ author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and Agarwal, Sandhini and Herbert-Voss, Ariel and Krueger, Gretchen and Henighan, Tom and Child, Rewon and Ramesh, Aditya and Ziegler, Daniel and Wu, Jeffrey and Winter, Clemens and Hesse, Chris and Chen, Mark and Sigler, Eric and Litwin, Mateusz and Gray, Scott and Chess, Benjamin and Clark, Jack and Berner, Christopher and McCandlish, Sam and Radford, Alec and Sutskever, Ilya and Amodei, Dario},
20
+ booktitle = {Advances in Neural Information Processing Systems},
21
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
22
+ pages = {1877--1901},
23
+ publisher = {Curran Associates, Inc.},
24
+ title = {Language Models are Few-Shot Learners},
25
+ url = {https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bfb8ac142f64a-Paper.pdf},
26
+ volume = {33},
27
+ year = {2020}
28
+ }
29
+ ```
30
+
31
+ ### Groups and Tasks
32
+
33
+ #### Groups
34
+
35
+ * `unscramble`
36
+
37
+ #### Tasks
38
+
39
+ * `anagrams1` - Anagrams of all but the first and last letter.
40
+ * `anagrams2` - Anagrams of all but the first and last 2 letters.
41
+ * `cycle_letters` - Cycle letters in a word.
42
+ * `random_insertion` - Random insertions in the word that must be removed.
43
+ * `reversed_words` - Words spelled backwards that must be reversed.
44
+
45
+ ### Checklist
46
+
47
+ For adding novel benchmarks/datasets to the library:
48
+ * [x] Is the task an existing benchmark in the literature?
49
+ * [x] Have you referenced the original paper that introduced the task?
50
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
51
+
52
+
53
+ If other tasks on this dataset are already supported:
54
+ * [x] Is the "Main" variant of this task clearly denoted?
55
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
56
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
57
+ * [x] Checked for equivalence with v0.3.0 LM Evaluation Harness