applied-ai-018 commited on
Commit
484d12e
·
verified ·
1 Parent(s): 5defe86

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/docs/img/fewshot_example_gpt3.png +3 -0
  2. lm-evaluation-harness/lm_eval/tasks/french_bench/README.md +94 -0
  3. lm-evaluation-harness/lm_eval/tasks/french_bench/_default_template_yaml +4 -0
  4. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_arc_challenge.yaml +21 -0
  5. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_boolqa.yaml +23 -0
  6. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml +29 -0
  7. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_bool.yaml +21 -0
  8. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml +31 -0
  9. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml +34 -0
  10. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_grammar.yaml +20 -0
  11. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_hellaswag.yaml +20 -0
  12. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_multifquad.yaml +34 -0
  13. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_opus_perplexity.yaml +23 -0
  14. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_orangesum_abstract.yaml +28 -0
  15. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_orangesum_title.yaml +28 -0
  16. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml +22 -0
  17. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml +23 -0
  18. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_trivia.yaml +36 -0
  19. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_vocab.yaml +20 -0
  20. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml +25 -0
  21. lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_xnli.yaml +21 -0
  22. lm-evaluation-harness/lm_eval/tasks/french_bench/preprocess_wikitext.py +48 -0
  23. lm-evaluation-harness/lm_eval/tasks/french_bench/utils.py +102 -0
  24. lm-evaluation-harness/lm_eval/tasks/glue/README.md +72 -0
  25. lm-evaluation-harness/lm_eval/tasks/glue/cola/default.yaml +16 -0
  26. lm-evaluation-harness/lm_eval/tasks/glue/mnli/default.yaml +14 -0
  27. lm-evaluation-harness/lm_eval/tasks/glue/mnli/mismatch.yaml +3 -0
  28. lm-evaluation-harness/lm_eval/tasks/glue/mnli/utils.py +6 -0
  29. lm-evaluation-harness/lm_eval/tasks/glue/mrpc/default.yaml +15 -0
  30. lm-evaluation-harness/lm_eval/tasks/glue/qnli/default.yaml +14 -0
  31. lm-evaluation-harness/lm_eval/tasks/glue/qqp/default.yaml +15 -0
  32. lm-evaluation-harness/lm_eval/tasks/glue/rte/default.yaml +14 -0
  33. lm-evaluation-harness/lm_eval/tasks/glue/sst2/default.yaml +14 -0
  34. lm-evaluation-harness/lm_eval/tasks/glue/wnli/default.yaml +14 -0
  35. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml +3 -0
  36. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml +3 -0
  37. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml +3 -0
  38. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml +3 -0
  39. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml +3 -0
  40. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_education.yaml +3 -0
  41. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml +3 -0
  42. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_information_technology.yaml +3 -0
  43. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml +3 -0
  44. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml +3 -0
  45. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/_direct_hard_kmmlu_yaml +27 -0
  46. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml +3 -0
  47. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml +3 -0
  48. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_criminal_law.yaml +3 -0
  49. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml +3 -0
  50. lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml +3 -0
lm-evaluation-harness/docs/img/fewshot_example_gpt3.png ADDED

Git LFS Details

  • SHA256: 6af5dc2196248b29260ba443e882725dd6cfc51ef17ad5a4dbab4f8ce6850c75
  • Pointer size: 131 Bytes
  • Size of remote file: 316 kB
lm-evaluation-harness/lm_eval/tasks/french_bench/README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FrenchBench
2
+
3
+ ### Paper
4
+
5
+ FrenchBench is a benchmark for evaluating French language models, introduced in the paper
6
+ [CroissantLLM: A Truly Bilingual French-English Language Model](https://arxiv.org/abs/2402.00786).
7
+ It is a collection of tasks that evaluate the ability of a language model to understand and generate French text.
8
+ This benchmark is constructed both from openly available datasets, as well as newly released manually annotated data.
9
+
10
+ ### Citation
11
+
12
+ ```bibtex
13
+ @misc{faysse2024croissantllm,
14
+ title={CroissantLLM: A Truly Bilingual French-English Language Model},
15
+ author={Manuel Faysse and Patrick Fernandes and Nuno M. Guerreiro and António Loison and Duarte M. Alves and Caio Corro and Nicolas Boizard and João Alves and Ricardo Rei and Pedro H. Martins and Antoni Bigata Casademunt and François Yvon and André F. T. Martins and Gautier Viaud and Céline Hudelot and Pierre Colombo},
16
+ year={2024},
17
+ eprint={2402.00786},
18
+ archivePrefix={arXiv},
19
+ primaryClass={cs.CL}
20
+ }
21
+ ```
22
+
23
+ ### Groups and Tasks
24
+
25
+ #### Groups
26
+
27
+ - `french_bench`: All tasks (non-perplexity based)
28
+ - `french_bench_gen`: All official generative tasks
29
+ - `french_bench_mc`: All official multiple choice tasks
30
+ - `french_bench_perplexity`: All perplexity-based tasks (0 shot is recommended)
31
+ - `french_bench_extra`: All extra tasks
32
+
33
+ #### Tasks
34
+
35
+
36
+ The following tasks evaluate tasks on the French Bench dataset using various scoring methods.
37
+ - french_bench_boolqa
38
+ - french_bench_fquadv2
39
+ - french_bench_fquadv2_bool
40
+ - french_bench_fquadv2_genq
41
+ - french_bench_fquadv2_hasAns
42
+ - french_bench_topic_based_nli
43
+ - french_bench_multifquad
44
+ - french_bench_grammar
45
+ - french_bench_vocab
46
+ - french_bench_reading_comp
47
+ - french_bench_xnli (modified XNLI)
48
+ - french_bench_orangesum_abstract
49
+ - french_bench_orangesum_title
50
+ - french_bench_trivia
51
+ - french_bench_hellaswag
52
+ - french_bench_arc_challenge
53
+
54
+ The french bench also includes other tasks from various benchmarks:
55
+ - `belebele_fra_Latn`: Belebele French
56
+ - `wmt14-en-fr`: WMT14 English-French
57
+ - `wmt14-fr-en`: WMT14 French-English
58
+
59
+ # Not to use in few-shot
60
+ - `crows_pairs_french`: Crows Pairs French
61
+ - `french_bench_opus_perplexity`: Opus Perplexity
62
+
63
+
64
+ ### Usage
65
+
66
+ ```bash
67
+ # openai
68
+ lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench --limit 100 --num_fewshot 3 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench_3shot.json
69
+ lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench_opus_perplexity,crows_pairs_french --limit 100 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench2_0shot.json
70
+
71
+
72
+ lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 8 --output_path data/french_bench/gpt2/results_french_bench_3shot.json
73
+ lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/gpt2/results_french_bench2_0shot.json
74
+
75
+ lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 4 --output_path data/french_bench/llama-2-7b-hf/results_french_bench_3shot.json
76
+ lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/llama-2-7b-hf/results_french_bench2_0shot.json
77
+ ```
78
+
79
+ HF and Accelerate options can be added when loading a model:
80
+ ```bash
81
+ accelerate launch -m lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf,dtype="float16" --tasks french_bench
82
+ ```
83
+
84
+ ### Checklist
85
+
86
+ * [x] Is the task an existing benchmark in the literature?
87
+ * [x] Have you referenced the original paper that introduced the task?
88
+ * [x] If yes, does the original paper provide a reference implementation?
89
+ * [x] Yes, original implementation contributed by author of the benchmark
90
+
91
+ If other tasks on this dataset are already supported:
92
+ * [x] Is the "Main" variant of this task clearly denoted?
93
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
94
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/french_bench/_default_template_yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ test_split: test
2
+ fewshot_split: valid
3
+ fewshot_config:
4
+ sampler: first_n
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_arc_challenge.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - french_bench
3
+ - french_bench_mc
4
+ task: french_bench_arc_challenge
5
+ dataset_path: manu/french_bench_arc_challenge
6
+ output_type: multiple_choice
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}\nRéponse:"
11
+ doc_to_target: "{{['A', 'B', 'C', 'D'].index(answerKey)}}"
12
+ doc_to_choice: "{{choices}}"
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: "Question: {{question}}\nRéponse:"
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_boolqa.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "D'après l'information dans le contexte donné, quelle est la réponse à la question ?"
6
+ task: french_bench_boolqa
7
+ dataset_path: manu/french_boolq
8
+ output_type: multiple_choice
9
+ validation_split: valid
10
+ doc_to_text: "\nContexte: {{passage}}\n\nQuestion: {{question}}\n"
11
+ doc_to_choice: ["Oui", "Non"]
12
+ # doc_to_text: "\nContexte: {{passage}}\n\nQuestion: {{question}}\n\nD'après l'information dans le contexte, la réponse est:\nA. Oui \nB. Non\n\nRéponse:"
13
+ # doc_to_choice: ["A", "B"]
14
+ doc_to_target: "{{[1, 0].index(label)}}"
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: passage
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ - metric: acc_norm
22
+ aggregation: mean
23
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'."
6
+ task: french_bench_fquadv2
7
+ dataset_path: manu/fquad2_test
8
+ output_type: generate_until
9
+ validation_split: valid
10
+ doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
11
+ doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}"
12
+ target_delimiter: " "
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: context
15
+ generation_kwargs:
16
+ until:
17
+ - "\n"
18
+ # filter_list:
19
+ # - name: remove_whitespace
20
+ # filter:
21
+ # - function: remove_whitespace
22
+ # - function: take_first
23
+ metric_list:
24
+ - metric: !function utils.exact
25
+ aggregation: mean
26
+ higher_is_better: true
27
+ - metric: !function utils.f1
28
+ aggregation: mean
29
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_bool.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "D'après l'information présente dans le contexte, est il possible de répondre à la question ?"
6
+ task: french_bench_fquadv2_bool
7
+ dataset_path: manu/fquad2_test
8
+ output_type: multiple_choice
9
+ validation_split: valid
10
+ doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nD'après l'information présente dans le contexte, répondre à la question est:\nA. Possible \nB. Impossible\n\nRéponse:"
11
+ doc_to_choice: ["A", "B"]
12
+ doc_to_target: "{{[False, True].index(is_impossible)}}"
13
+ should_decontaminate: true
14
+ doc_to_decontamination_query: context
15
+ metric_list:
16
+ - metric: acc
17
+ aggregation: mean
18
+ higher_is_better: true
19
+ - metric: acc_norm
20
+ aggregation: mean
21
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_gen
5
+ description: "D'après l'information dans le contexte donné, quelle question a été posée pour obtenir la réponse donnée ?"
6
+ task: french_bench_fquadv2_genq
7
+ dataset_path: manu/fquad2_test
8
+ output_type: generate_until
9
+ validation_split: valid_hasAns
10
+ test_split: test_hasAns
11
+ fewshot_split: valid_hasAns
12
+ doc_to_text: "\nContexte: {{context}}\n\nRéponse: {% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}\n\nQuestion:"
13
+ doc_to_target: "{{question}}"
14
+ target_delimiter: " "
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: question
17
+ generation_kwargs:
18
+ until:
19
+ - "\n"
20
+ # filter_list:
21
+ # - name: remove_whitespace
22
+ # filter:
23
+ # - function: remove_whitespace
24
+ # - function: take_first
25
+ metric_list:
26
+ - metric: !function utils.rouge1
27
+ higher_is_better: true
28
+ aggregation: !function utils.rouge1_agg
29
+ - metric: !function utils.f1
30
+ aggregation: mean
31
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_gen
5
+ description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'."
6
+ task: french_bench_fquadv2_hasAns
7
+ dataset_path: manu/fquad2_test
8
+ output_type: generate_until
9
+ validation_split: valid_hasAns
10
+ test_split: test_hasAns
11
+ fewshot_split: valid_hasAns
12
+ doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
13
+ doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}"
14
+ target_delimiter: " "
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: context
17
+ generation_kwargs:
18
+ until:
19
+ - "\n"
20
+ # filter_list:
21
+ # - name: remove_whitespace
22
+ # filter:
23
+ # - function: remove_whitespace
24
+ # - function: take_first
25
+ metric_list:
26
+ - metric: !function utils.exact
27
+ aggregation: mean
28
+ higher_is_better: true
29
+ - metric: !function utils.f1
30
+ aggregation: mean
31
+ higher_is_better: true
32
+ - metric: !function utils.rouge1
33
+ higher_is_better: true
34
+ aggregation: !function utils.rouge1_agg
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_grammar.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_mc
5
+ description: "Répond au mieux en complétant la question avec une des réponses proposées."
6
+ dataset_path: manu/french-bench-grammar-vocab-reading
7
+ output_type: multiple_choice
8
+ validation_split: Grammar
9
+ fewshot_split: Grammar
10
+ test_split: Grammar
11
+ #doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
12
+ #doc_to_choice: ["A", "B", "C", "D"]
13
+ doc_to_text: "La phrase suivante est correcte grammaticalement:\n"
14
+ doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}"
15
+ doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
16
+ task: french_bench_grammar
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_hellaswag.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - french_bench
3
+ - french_bench_mc
4
+ task: french_bench_hellaswag
5
+ dataset_path: manu/french_bench_hellaswag
6
+ output_type: multiple_choice
7
+ training_split: validation
8
+ validation_split: validation
9
+ test_split: null
10
+ process_docs: !function utils.process_docs
11
+ doc_to_text: "{{query}}"
12
+ doc_to_target: "{{label}}"
13
+ doc_to_choice: "{{choices}}"
14
+ metric_list:
15
+ - metric: acc
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ - metric: acc_norm
19
+ aggregation: mean
20
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_multifquad.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_gen
5
+ description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques extraits du contexte."
6
+ task: french_bench_multifquad
7
+ dataset_path: manu/multifquad_test
8
+ output_type: generate_until
9
+ validation_split: valid
10
+ test_split: test
11
+ fewshot_split: valid
12
+ doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
13
+ doc_to_target: "{{', '.join(answers.text)}}"
14
+ target_delimiter: " "
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: context
17
+ generation_kwargs:
18
+ until:
19
+ - "\n"
20
+ # filter_list:
21
+ # - name: remove_whitespace
22
+ # filter:
23
+ # - function: remove_whitespace
24
+ # - function: take_first
25
+ metric_list:
26
+ - metric: !function utils.exact
27
+ aggregation: mean
28
+ higher_is_better: true
29
+ - metric: !function utils.f1
30
+ aggregation: mean
31
+ higher_is_better: true
32
+ - metric: !function utils.rouge1
33
+ higher_is_better: true
34
+ aggregation: !function utils.rouge1_agg
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_opus_perplexity.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - french_bench_perplexity
3
+ task: french_bench_opus_perplexity
4
+ dataset_path: manu/opus100-en-fr
5
+ output_type: loglikelihood_rolling
6
+ test_split: test
7
+ fewshot_split: validation
8
+ validation_split: validation
9
+ num_fewshot: 0
10
+ doc_to_text: ""
11
+ doc_to_target: "{{text}}"
12
+ should_decontaminate: true
13
+ doc_to_decontamination_query: "{{text}}"
14
+ metric_list:
15
+ - metric: word_perplexity
16
+ aggregation: weighted_perplexity
17
+ higher_is_better: false
18
+ - metric: byte_perplexity
19
+ aggregation: weighted_perplexity
20
+ higher_is_better: false
21
+ - metric: bits_per_byte
22
+ aggregation: bits_per_byte
23
+ higher_is_better: false
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_orangesum_abstract.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_gen
5
+ description: "Résume l'article en une phrase."
6
+ task: french_bench_orangesum_abstract
7
+ dataset_path: orange_sum
8
+ dataset_name: abstract
9
+ output_type: generate_until
10
+ validation_split: validation
11
+ fewshot_split: validation
12
+ doc_to_text: "\nArticle: {{text}}\n\nRésumé:"
13
+ doc_to_target: "{{summary}}"
14
+ target_delimiter: " "
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: summary
17
+ generation_kwargs:
18
+ until:
19
+ - "\n"
20
+ # filter_list:
21
+ # - name: remove_whitespace
22
+ # filter:
23
+ # - function: remove_whitespace
24
+ # - function: take_first
25
+ metric_list:
26
+ - metric: !function utils.rouge1
27
+ higher_is_better: true
28
+ aggregation: !function utils.rouge1_agg
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_orangesum_title.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "Trouve le titre de l'article."
6
+ task: french_bench_orangesum_title
7
+ dataset_path: orange_sum
8
+ dataset_name: title
9
+ output_type: generate_until
10
+ validation_split: validation
11
+ fewshot_split: validation
12
+ doc_to_text: "\nArticle: {{text}}\n\nTitre:"
13
+ doc_to_target: "{{summary}}"
14
+ target_delimiter: " "
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: summary
17
+ generation_kwargs:
18
+ until:
19
+ - "\n"
20
+ # filter_list:
21
+ # - name: remove_whitespace
22
+ # filter:
23
+ # - function: remove_whitespace
24
+ # - function: take_first
25
+ metric_list:
26
+ - metric: !function utils.rouge1
27
+ higher_is_better: true
28
+ aggregation: !function utils.rouge1_agg
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ # description: "Répond au mieux en complétant la question avec une des réponses proposées."
6
+ dataset_path: manu/french-bench-grammar-vocab-reading
7
+ output_type: multiple_choice
8
+ validation_split: Reading
9
+ fewshot_split: Reading
10
+ test_split: Reading
11
+ # doc_to_text: "Context: {{context}}\nQuestion: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
12
+ # doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}"
13
+ doc_to_text: "Context: {{context}}\n\n"
14
+ doc_to_choice: "{{[question.replace('<...>', answerA) if '<...>' in question else question + ' ' +answerA, question.replace('<...>', answerB) if '<...>' in question else question + ' ' + answerB, question.replace('<...>', answerC) if '<...>' in question else question + ' ' + answerC, question.replace('<...>', answerD) if '<...>' in question else question + ' ' + answerD]}}"
15
+ doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
16
+ # doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}"
17
+ # doc_to_target: answer
18
+ task: french_bench_reading_comp
19
+ metric_list:
20
+ - metric: acc
21
+ aggregation: mean
22
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "A propos du thème spécifié, l'avis client est il positif, négatif, ou neutre ?"
6
+ task: french_bench_topic_based_nli
7
+ dataset_path: manu/topic_based_nli_test
8
+ output_type: multiple_choice
9
+ validation_split: valid
10
+ # doc_to_text: "\nAvis Client: {{text}}\n\nEn considèrant uniquement le thème \"{{topic}}\", l'avis client est plutot:\nA. Positif \nB. Négatif\nC. Mitigé \nD. Neutre\nE. Absent\n\nRéponse:"
11
+ # doc_to_choice: ["A", "B", "C", "D", "E"]
12
+ doc_to_text: "\nAvis Client: {{text}}\n\nA propos du thème \"{{topic}}\", l'avis client est"
13
+ doc_to_choice: ['positif', 'négatif', 'neutre']
14
+ doc_to_target: "{{['positif', 'negatif', 'neutre'].index(polarity)}}"
15
+ should_decontaminate: true
16
+ doc_to_decontamination_query: texte
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ - metric: acc_norm
22
+ aggregation: mean
23
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_trivia.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_gen
5
+ task: french_bench_trivia
6
+ dataset_path: manu/french-trivia
7
+ output_type: generate_until
8
+ validation_split: train
9
+ test_split: train
10
+ fewshot_split: train
11
+ doc_to_text: "{{Question}}\nAnswer:"
12
+ doc_to_target: "{{Answer}}"
13
+ target_delimiter: " "
14
+ should_decontaminate: true
15
+ doc_to_decontamination_query: Question
16
+ generation_kwargs:
17
+ until:
18
+ - "\n"
19
+ # filter_list:
20
+ # - name: remove_whitespace
21
+ # filter:
22
+ # - function: remove_whitespace
23
+ # - function: take_first
24
+ metric_list:
25
+ - metric: !function utils.exact
26
+ aggregation: mean
27
+ higher_is_better: true
28
+ - metric: !function utils.f1
29
+ aggregation: mean
30
+ higher_is_better: true
31
+ - metric: !function utils.rouge1
32
+ higher_is_better: true
33
+ aggregation: !function utils.rouge1_agg
34
+ - metric: !function utils.is_included
35
+ higher_is_better: true
36
+ aggregation: mean
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_vocab.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_mc
5
+ # description: "Répond au mieux en complétant la question avec une des réponses proposées."
6
+ dataset_path: manu/french-bench-grammar-vocab-reading
7
+ output_type: multiple_choice
8
+ validation_split: Vocabulary
9
+ fewshot_split: Vocabulary
10
+ test_split: Vocabulary
11
+ # doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
12
+ # doc_to_choice: ["A", "B", "C", "D"]
13
+ doc_to_text: "La phrase suivante est logique sémantiquement:\n"
14
+ doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}"
15
+ doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
16
+ task: french_bench_vocab
17
+ metric_list:
18
+ - metric: acc
19
+ aggregation: mean
20
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - french_bench_perplexity
3
+ task: french_bench_wikitext_fr
4
+ dataset_path: asi/wikitext_fr
5
+ dataset_name: wikitext-35
6
+ output_type: loglikelihood_rolling
7
+ training_split: train
8
+ validation_split: validation
9
+ test_split: test
10
+ num_fewshot: 0
11
+ doc_to_text: ""
12
+ doc_to_target: !function preprocess_wikitext.wikitext_detokenizer
13
+ process_results: !function preprocess_wikitext.process_results
14
+ should_decontaminate: true
15
+ doc_to_decontamination_query: "{{paragraph}}"
16
+ metric_list:
17
+ - metric: word_perplexity
18
+ aggregation: weighted_perplexity
19
+ higher_is_better: false
20
+ - metric: byte_perplexity
21
+ aggregation: weighted_perplexity
22
+ higher_is_better: false
23
+ - metric: bits_per_byte
24
+ aggregation: bits_per_byte
25
+ higher_is_better: false
lm-evaluation-harness/lm_eval/tasks/french_bench/french_bench_xnli.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: "_default_template_yaml"
2
+ group:
3
+ - french_bench
4
+ - french_bench_extra
5
+ description: "La prémisse et l'hypothèse sont elles en accord, neutres en elles, ou en contradiction ?"
6
+ dataset_path: xnli
7
+ dataset_name: fr
8
+ output_type: multiple_choice
9
+ validation_split: validation
10
+ fewshot_split: validation
11
+ test_split: test
12
+ # doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont:\nA. En accord\nB. Neutre\nC. En contradiction\nRéponse:"
13
+ # doc_to_choice: "{{['A: En accord', 'B: Neutre', 'C: En contradiction']}}"
14
+ doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont"
15
+ doc_to_choice: "{{['en accord', 'neutres entre elles', 'en contradiction']}}"
16
+ doc_to_target: label
17
+ task: french_bench_xnli
18
+ metric_list:
19
+ - metric: acc
20
+ aggregation: mean
21
+ higher_is_better: true
lm-evaluation-harness/lm_eval/tasks/french_bench/preprocess_wikitext.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def wikitext_detokenizer(doc):
5
+ string = doc["paragraph"]
6
+ # contractions
7
+ string = string.replace("s '", "s'")
8
+ string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
9
+ # number separators
10
+ string = string.replace(" @-@ ", "-")
11
+ string = string.replace(" @,@ ", ",")
12
+ string = string.replace(" @.@ ", ".")
13
+ # punctuation
14
+ string = string.replace(" : ", ": ")
15
+ string = string.replace(" ; ", "; ")
16
+ string = string.replace(" . ", ". ")
17
+ string = string.replace(" ! ", "! ")
18
+ string = string.replace(" ? ", "? ")
19
+ string = string.replace(" , ", ", ")
20
+ # double brackets
21
+ string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
22
+ string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
23
+ string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
24
+ string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
25
+ string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
26
+ # miscellaneous
27
+ string = string.replace("= = = =", "====")
28
+ string = string.replace("= = =", "===")
29
+ string = string.replace("= =", "==")
30
+ string = string.replace(" " + chr(176) + " ", chr(176))
31
+ string = string.replace(" \n", "\n")
32
+ string = string.replace("\n ", "\n")
33
+ string = string.replace(" N ", " 1 ")
34
+ string = string.replace(" 's", "'s")
35
+
36
+ return string
37
+
38
+
39
+ def process_results(doc, results):
40
+ (loglikelihood,) = results
41
+ # IMPORTANT: wikitext counts number of words in *original doc before detokenization*
42
+ _words = len(re.split(r"\s+", doc["paragraph"]))
43
+ _bytes = len(doc["paragraph"].encode("utf-8"))
44
+ return {
45
+ "word_perplexity": (loglikelihood, _words),
46
+ "byte_perplexity": (loglikelihood, _bytes),
47
+ "bits_per_byte": (loglikelihood, _bytes),
48
+ }
lm-evaluation-harness/lm_eval/tasks/french_bench/utils.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import re
3
+ import string
4
+
5
+ import datasets
6
+ import evaluate
7
+
8
+
9
+ def normalize_answer(s):
10
+ """Lower text and remove punctuation, articles and extra whitespace."""
11
+
12
+ def remove_articles(text):
13
+ regex = re.compile(r"\b(un|une|des|le|la|les)\b", re.UNICODE)
14
+ return re.sub(regex, " ", text)
15
+
16
+ def white_space_fix(text):
17
+ return " ".join(text.split())
18
+
19
+ def remove_punc(text):
20
+ exclude = set(string.punctuation)
21
+ return "".join(ch for ch in text if ch not in exclude)
22
+
23
+ def lower(text):
24
+ return text.lower()
25
+
26
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
27
+
28
+
29
+ def get_tokens(s):
30
+ if not s:
31
+ return []
32
+ return normalize_answer(s).split()
33
+
34
+
35
+ # Exact match (the normalized answer exactly match the gold answer)
36
+ def exact(predictions, references):
37
+ return int(normalize_answer(references[0]) == normalize_answer(predictions[0]))
38
+
39
+
40
+ # The F-score of predicted tokens versus the gold answer
41
+ def f1(predictions, references):
42
+ gold_toks = get_tokens(references[0])
43
+ pred_toks = get_tokens(predictions[0])
44
+ common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
45
+ num_same = sum(common.values())
46
+ if len(gold_toks) == 0 or len(pred_toks) == 0:
47
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
48
+ return int(gold_toks == pred_toks)
49
+ if num_same == 0:
50
+ return 0
51
+ precision = 1.0 * num_same / len(pred_toks)
52
+ recall = 1.0 * num_same / len(gold_toks)
53
+ f1 = (2 * precision * recall) / (precision + recall)
54
+ return f1
55
+
56
+
57
+ def rouge1(items):
58
+ """
59
+ # passthrough for efficiency
60
+ """
61
+ return items
62
+
63
+
64
+ def rouge1_agg(items):
65
+ """
66
+ Higher is better
67
+ """
68
+ refs = list(zip(*items))[0]
69
+ preds = list(zip(*items))[1]
70
+ rouge_scorer = evaluate.load("rouge")
71
+ return rouge_scorer.compute(predictions=preds, references=refs)["rouge1"]
72
+
73
+
74
+ def is_included(items):
75
+ """
76
+ # passthrough for efficiency
77
+ """
78
+ if items[0] in items[1]:
79
+ return True
80
+ return False
81
+
82
+
83
+ def preprocess(text):
84
+ text = text.strip()
85
+ # NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
86
+ text = text.replace(" [title]", ". ")
87
+ text = re.sub("\\[.*?\\]", "", text)
88
+ text = text.replace(" ", " ")
89
+ return text
90
+
91
+
92
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
93
+ def _process_doc(doc):
94
+ ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
95
+ out_doc = {
96
+ "query": preprocess(doc["activity_label"] + ": " + ctx),
97
+ "choices": [preprocess(ending) for ending in doc["endings"]],
98
+ "gold": int(doc["label"]),
99
+ }
100
+ return out_doc
101
+
102
+ return dataset.map(_process_doc)
lm-evaluation-harness/lm_eval/tasks/glue/README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GLUE
2
+ **NOTE**: GLUE benchmark tasks do not provide publicly accessible labels for their test sets, so we default to the validation sets for all sub-tasks.
3
+
4
+ ### Paper
5
+
6
+ Title: `GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding`
7
+
8
+ Abstract: https://openreview.net/pdf?id=rJ4km2R5t7
9
+
10
+ The General Language Understanding Evaluation (GLUE) benchmark is a collection of
11
+ resources for training, evaluating, and analyzing natural language understanding
12
+ systems. GLUE consists of:
13
+ - A benchmark of nine sentence- or sentence-pair language understanding tasks built
14
+ on established existing datasets and selected to cover a diverse range of dataset
15
+ sizes, text genres, and degrees of difficulty, and
16
+ - A diagnostic dataset designed to evaluate and analyze model performance with
17
+ respect to a wide range of linguistic phenomena found in natural language.
18
+
19
+ Homepage: https://gluebenchmark.com/
20
+
21
+ ### Citation
22
+
23
+ ```
24
+ @inproceedings{wang-etal-2018-glue,
25
+ title = "{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding",
26
+ author = "Wang, Alex and
27
+ Singh, Amanpreet and
28
+ Michael, Julian and
29
+ Hill, Felix and
30
+ Levy, Omer and
31
+ Bowman, Samuel",
32
+ booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
33
+ month = nov,
34
+ year = "2018",
35
+ address = "Brussels, Belgium",
36
+ publisher = "Association for Computational Linguistics",
37
+ url = "https://aclanthology.org/W18-5446",
38
+ doi = "10.18653/v1/W18-5446",
39
+ pages = "353--355",
40
+ abstract = "Human ability to understand language is \textit{general, flexible, and robust}. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.",
41
+ }
42
+ ```
43
+
44
+ ### Groups and Tasks
45
+
46
+ #### Groups
47
+
48
+ * `glue`: Run all Glue subtasks.
49
+
50
+ #### Tasks
51
+
52
+ * `cola`
53
+ * `mnli`
54
+ * `mrpc`
55
+ * `qnli`
56
+ * `qqp`
57
+ * `rte`
58
+ * `sst`
59
+ * `wnli`
60
+
61
+ ### Checklist
62
+
63
+ For adding novel benchmarks/datasets to the library:
64
+ * [ ] Is the task an existing benchmark in the literature?
65
+ * [ ] Have you referenced the original paper that introduced the task?
66
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
67
+
68
+
69
+ If other tasks on this dataset are already supported:
70
+ * [ ] Is the "Main" variant of this task clearly denoted?
71
+ * [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
72
+ * [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
lm-evaluation-harness/lm_eval/tasks/glue/cola/default.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: cola
3
+ dataset_path: glue
4
+ dataset_name: cola
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{sentence}}\nQuestion: Does this sentence make sense?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["no", "yes"]
11
+ should_decontaminate: true
12
+ doc_to_decontamination_query: sentence
13
+ metric_list:
14
+ - metric: mcc
15
+ metadata:
16
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/mnli/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: mnli
3
+ dataset_path: glue
4
+ dataset_name: mnli
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation_matched
8
+ doc_to_text: !function utils.doc_to_text
9
+ doc_to_target: label
10
+ doc_to_choice: ["True", "Neither", "False"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/mnli/mismatch.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: default.yaml
2
+ task: mnli_mismatch
3
+ validation_split: validation_mismatched
lm-evaluation-harness/lm_eval/tasks/glue/mnli/utils.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def doc_to_text(doc) -> str:
2
+ return "{}\nQuestion: {} True, False or Neither?\nAnswer:".format(
3
+ doc["premise"],
4
+ doc["hypothesis"].strip()
5
+ + ("" if doc["hypothesis"].strip().endswith(".") else "."),
6
+ )
lm-evaluation-harness/lm_eval/tasks/glue/mrpc/default.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: mrpc
3
+ dataset_path: glue
4
+ dataset_name: mrpc
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["no", "yes"]
11
+ metric_list:
12
+ - metric: acc
13
+ - metric: f1
14
+ metadata:
15
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/qnli/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: qnli
3
+ dataset_path: glue
4
+ dataset_name: qnli
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{question}}\n{{sentence}}\nQuestion: Does this response answer the question?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["yes", "no"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/qqp/default.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: qqp
3
+ dataset_path: glue
4
+ dataset_name: qqp
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "Question 1: {{question1}}\nQuestion 2: {{question2}}\nQuestion: Do both questions ask the same thing?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["no", "yes"]
11
+ metric_list:
12
+ - metric: acc
13
+ - metric: f1
14
+ metadata:
15
+ version: 2.0
lm-evaluation-harness/lm_eval/tasks/glue/rte/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: rte
3
+ dataset_path: glue
4
+ dataset_name: rte
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["True", "False"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/sst2/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: sst2
3
+ dataset_path: glue
4
+ dataset_name: sst2
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["negative", "positive"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 1.0
lm-evaluation-harness/lm_eval/tasks/glue/wnli/default.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: glue
2
+ task: wnli
3
+ dataset_path: glue
4
+ dataset_name: wnli
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ doc_to_text: "{{sentence1}}\nQuestion: {{sentence2}} True or False?\nAnswer:"
9
+ doc_to_target: label
10
+ doc_to_choice: ["False", "True"]
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 2.0
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Civil-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_civil_engineering
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Construction
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_construction
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Criminal-Law
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_criminal_law
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Ecology
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_ecology
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Economics
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_economics
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_education.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Education
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_education
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Electrical-Engineering
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_electrical_engineering
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_information_technology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Information-Technology
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_information_technology
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Social-Welfare
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_social_welfare
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: Taxation
2
+ include: _direct_kmmlu_yaml
3
+ task: kmmlu_direct_taxation
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/_direct_hard_kmmlu_yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - kmmlu
3
+ - kmmlu_hard_direct
4
+ dataset_path: HAERAE-HUB/KMMLU-HARD
5
+ output_type: generate_until
6
+ test_split: test
7
+ fewshot_split: dev
8
+ doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n정답:"
9
+ doc_to_target: "{{['A', 'B', 'C', 'D'][answer-1]}}"
10
+ metric_list:
11
+ - metric: exact_match
12
+ aggregation: mean
13
+ higher_is_better: true
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regexes_to_ignore:
17
+ - " "
18
+ generation_kwargs:
19
+ until:
20
+ - "Q:"
21
+ - "\n\n"
22
+ - "</s>"
23
+ - "."
24
+ do_sample: false
25
+ temperature: 0.0
26
+ metadata:
27
+ version: 2.0
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: accounting
2
+ include: _direct_hard_kmmlu_yaml
3
+ task: kmmlu_hard_direct_accounting
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: aviation_engineering_and_maintenance
2
+ include: _direct_hard_kmmlu_yaml
3
+ task: kmmlu_hard_direct_aviation_engineering_and_maintenance
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_criminal_law.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: criminal_law
2
+ include: _direct_hard_kmmlu_yaml
3
+ task: kmmlu_hard_direct_criminal_law
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: ecology
2
+ include: _direct_hard_kmmlu_yaml
3
+ task: kmmlu_hard_direct_ecology
lm-evaluation-harness/lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dataset_name: economics
2
+ include: _direct_hard_kmmlu_yaml
3
+ task: kmmlu_hard_direct_economics