Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/lm_eval/tasks/belebele/_generate_configs.py +66 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_acm_Arab.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_arz_Arab.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_bod_Tibt.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_ces_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_hun_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_khk_Cyrl.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_kor_Hang.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_lvs_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_mal_Mlym.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_nso_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_pbt_Arab.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_sin_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_sot_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/belebele/belebele_wol_Latn.yaml +4 -0
- lm-evaluation/lm_eval/tasks/french_bench/README.md +94 -0
- lm-evaluation/lm_eval/tasks/french_bench/_default_template_yaml +4 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_arc_challenge.yaml +21 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_boolqa.yaml +23 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml +29 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_bool.yaml +21 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml +31 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml +34 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_grammar.yaml +20 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_hellaswag.yaml +20 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_multifquad.yaml +34 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_opus_perplexity.yaml +23 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_orangesum_title.yaml +28 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml +22 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml +23 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_trivia.yaml +36 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_vocab.yaml +20 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml +25 -0
- lm-evaluation/lm_eval/tasks/french_bench/french_bench_xnli.yaml +21 -0
- lm-evaluation/lm_eval/tasks/french_bench/preprocess_wikitext.py +48 -0
- lm-evaluation/lm_eval/tasks/french_bench/utils.py +102 -0
- lm-evaluation/lm_eval/tasks/ifeval/instructions_registry.py +167 -0
- lm-evaluation/lm_eval/tasks/ifeval/instructions_util.py +1682 -0
- lm-evaluation/lm_eval/tasks/ifeval/utils.py +139 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/README.md +47 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml +7 -0
- lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml +7 -0
lm-evaluation/lm_eval/tasks/belebele/_generate_configs.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Take in a YAML, and output all other splits with this YAML
|
3 |
+
"""
|
4 |
+
import argparse
|
5 |
+
import os
|
6 |
+
|
7 |
+
import requests
|
8 |
+
import yaml
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
from lm_eval.utils import logging
|
12 |
+
|
13 |
+
|
14 |
+
API_URL = "https://datasets-server.huggingface.co/splits?dataset=facebook/belebele"
|
15 |
+
|
16 |
+
|
17 |
+
def parse_args():
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
parser.add_argument("--base_yaml_path", required=True)
|
20 |
+
parser.add_argument("--save_prefix_path", default="belebele")
|
21 |
+
parser.add_argument("--cot_prompt_path", default=None)
|
22 |
+
parser.add_argument("--task_prefix", default="")
|
23 |
+
return parser.parse_args()
|
24 |
+
|
25 |
+
|
26 |
+
if __name__ == "__main__":
|
27 |
+
args = parse_args()
|
28 |
+
|
29 |
+
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
|
30 |
+
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
|
31 |
+
with open(args.base_yaml_path, encoding="utf-8") as f:
|
32 |
+
base_yaml = yaml.full_load(f)
|
33 |
+
|
34 |
+
if args.cot_prompt_path is not None:
|
35 |
+
import json
|
36 |
+
|
37 |
+
with open(args.cot_prompt_path, encoding="utf-8") as f:
|
38 |
+
cot_file = json.load(f)
|
39 |
+
|
40 |
+
def query():
|
41 |
+
response = requests.get(API_URL)
|
42 |
+
return response.json()["splits"]
|
43 |
+
|
44 |
+
print(query())
|
45 |
+
languages = [split["split"] for split in query()]
|
46 |
+
|
47 |
+
for lang in tqdm([lang for lang in languages if "default" not in lang]):
|
48 |
+
yaml_dict = {
|
49 |
+
"include": base_yaml_name,
|
50 |
+
"task": f"belebele_{args.task_prefix}_{lang}"
|
51 |
+
if args.task_prefix != ""
|
52 |
+
else f"belebele_{lang}",
|
53 |
+
"test_split": lang,
|
54 |
+
"fewshot_split": lang,
|
55 |
+
}
|
56 |
+
|
57 |
+
file_save_path = args.save_prefix_path + f"_{lang}.yaml"
|
58 |
+
logging.info(f"Saving yaml for subset {lang} to {file_save_path}")
|
59 |
+
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
|
60 |
+
yaml.dump(
|
61 |
+
yaml_dict,
|
62 |
+
yaml_file,
|
63 |
+
width=float("inf"),
|
64 |
+
allow_unicode=True,
|
65 |
+
default_style='"',
|
66 |
+
)
|
lm-evaluation/lm_eval/tasks/belebele/belebele_acm_Arab.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "acm_Arab"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_acm_Arab"
|
4 |
+
"test_split": "acm_Arab"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_arz_Arab.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "arz_Arab"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_arz_Arab"
|
4 |
+
"test_split": "arz_Arab"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_bod_Tibt.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "bod_Tibt"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_bod_Tibt"
|
4 |
+
"test_split": "bod_Tibt"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_ces_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "ces_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_ces_Latn"
|
4 |
+
"test_split": "ces_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_hin_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "hin_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_hin_Latn"
|
4 |
+
"test_split": "hin_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_hun_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "hun_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_hun_Latn"
|
4 |
+
"test_split": "hun_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_khk_Cyrl.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "khk_Cyrl"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_khk_Cyrl"
|
4 |
+
"test_split": "khk_Cyrl"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_kor_Hang.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "kor_Hang"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_kor_Hang"
|
4 |
+
"test_split": "kor_Hang"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_lvs_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "lvs_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_lvs_Latn"
|
4 |
+
"test_split": "lvs_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_mal_Mlym.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "mal_Mlym"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_mal_Mlym"
|
4 |
+
"test_split": "mal_Mlym"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_nso_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "nso_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_nso_Latn"
|
4 |
+
"test_split": "nso_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_pbt_Arab.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "pbt_Arab"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_pbt_Arab"
|
4 |
+
"test_split": "pbt_Arab"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_sin_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "sin_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_sin_Latn"
|
4 |
+
"test_split": "sin_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_sot_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "sot_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_sot_Latn"
|
4 |
+
"test_split": "sot_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_tgl_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "tgl_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_tgl_Latn"
|
4 |
+
"test_split": "tgl_Latn"
|
lm-evaluation/lm_eval/tasks/belebele/belebele_wol_Latn.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"fewshot_split": "wol_Latn"
|
2 |
+
"include": "_default_template_yaml"
|
3 |
+
"task": "belebele_wol_Latn"
|
4 |
+
"test_split": "wol_Latn"
|
lm-evaluation/lm_eval/tasks/french_bench/README.md
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FrenchBench
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
FrenchBench is a benchmark for evaluating French language models, introduced in the paper
|
6 |
+
[CroissantLLM: A Truly Bilingual French-English Language Model](https://arxiv.org/abs/2402.00786).
|
7 |
+
It is a collection of tasks that evaluate the ability of a language model to understand and generate French text.
|
8 |
+
This benchmark is constructed both from openly available datasets, as well as newly released manually annotated data.
|
9 |
+
|
10 |
+
### Citation
|
11 |
+
|
12 |
+
```bibtex
|
13 |
+
@misc{faysse2024croissantllm,
|
14 |
+
title={CroissantLLM: A Truly Bilingual French-English Language Model},
|
15 |
+
author={Manuel Faysse and Patrick Fernandes and Nuno M. Guerreiro and António Loison and Duarte M. Alves and Caio Corro and Nicolas Boizard and João Alves and Ricardo Rei and Pedro H. Martins and Antoni Bigata Casademunt and François Yvon and André F. T. Martins and Gautier Viaud and Céline Hudelot and Pierre Colombo},
|
16 |
+
year={2024},
|
17 |
+
eprint={2402.00786},
|
18 |
+
archivePrefix={arXiv},
|
19 |
+
primaryClass={cs.CL}
|
20 |
+
}
|
21 |
+
```
|
22 |
+
|
23 |
+
### Groups and Tasks
|
24 |
+
|
25 |
+
#### Groups
|
26 |
+
|
27 |
+
- `french_bench`: All tasks (non-perplexity based)
|
28 |
+
- `french_bench_gen`: All official generative tasks
|
29 |
+
- `french_bench_mc`: All official multiple choice tasks
|
30 |
+
- `french_bench_perplexity`: All perplexity-based tasks (0 shot is recommended)
|
31 |
+
- `french_bench_extra`: All extra tasks
|
32 |
+
|
33 |
+
#### Tasks
|
34 |
+
|
35 |
+
|
36 |
+
The following tasks evaluate tasks on the French Bench dataset using various scoring methods.
|
37 |
+
- french_bench_boolqa
|
38 |
+
- french_bench_fquadv2
|
39 |
+
- french_bench_fquadv2_bool
|
40 |
+
- french_bench_fquadv2_genq
|
41 |
+
- french_bench_fquadv2_hasAns
|
42 |
+
- french_bench_topic_based_nli
|
43 |
+
- french_bench_multifquad
|
44 |
+
- french_bench_grammar
|
45 |
+
- french_bench_vocab
|
46 |
+
- french_bench_reading_comp
|
47 |
+
- french_bench_xnli (modified XNLI)
|
48 |
+
- french_bench_orangesum_abstract
|
49 |
+
- french_bench_orangesum_title
|
50 |
+
- french_bench_trivia
|
51 |
+
- french_bench_hellaswag
|
52 |
+
- french_bench_arc_challenge
|
53 |
+
|
54 |
+
The french bench also includes other tasks from various benchmarks:
|
55 |
+
- `belebele_fra_Latn`: Belebele French
|
56 |
+
- `wmt14-en-fr`: WMT14 English-French
|
57 |
+
- `wmt14-fr-en`: WMT14 French-English
|
58 |
+
|
59 |
+
# Not to use in few-shot
|
60 |
+
- `crows_pairs_french`: Crows Pairs French
|
61 |
+
- `french_bench_opus_perplexity`: Opus Perplexity
|
62 |
+
|
63 |
+
|
64 |
+
### Usage
|
65 |
+
|
66 |
+
```bash
|
67 |
+
# openai
|
68 |
+
lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench --limit 100 --num_fewshot 3 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench_3shot.json
|
69 |
+
lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench_opus_perplexity,crows_pairs_french --limit 100 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench2_0shot.json
|
70 |
+
|
71 |
+
|
72 |
+
lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 8 --output_path data/french_bench/gpt2/results_french_bench_3shot.json
|
73 |
+
lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/gpt2/results_french_bench2_0shot.json
|
74 |
+
|
75 |
+
lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 4 --output_path data/french_bench/llama-2-7b-hf/results_french_bench_3shot.json
|
76 |
+
lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/llama-2-7b-hf/results_french_bench2_0shot.json
|
77 |
+
```
|
78 |
+
|
79 |
+
HF and Accelerate options can be added when loading a model:
|
80 |
+
```bash
|
81 |
+
accelerate launch -m lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf,dtype="float16" --tasks french_bench
|
82 |
+
```
|
83 |
+
|
84 |
+
### Checklist
|
85 |
+
|
86 |
+
* [x] Is the task an existing benchmark in the literature?
|
87 |
+
* [x] Have you referenced the original paper that introduced the task?
|
88 |
+
* [x] If yes, does the original paper provide a reference implementation?
|
89 |
+
* [x] Yes, original implementation contributed by author of the benchmark
|
90 |
+
|
91 |
+
If other tasks on this dataset are already supported:
|
92 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
93 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
94 |
+
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/french_bench/_default_template_yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
test_split: test
|
2 |
+
fewshot_split: valid
|
3 |
+
fewshot_config:
|
4 |
+
sampler: first_n
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_arc_challenge.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- french_bench
|
3 |
+
- french_bench_mc
|
4 |
+
task: french_bench_arc_challenge
|
5 |
+
dataset_path: manu/french_bench_arc_challenge
|
6 |
+
output_type: multiple_choice
|
7 |
+
training_split: train
|
8 |
+
validation_split: validation
|
9 |
+
test_split: test
|
10 |
+
doc_to_text: "Question: {{question}}\nRéponse:"
|
11 |
+
doc_to_target: "{{['A', 'B', 'C', 'D'].index(answerKey)}}"
|
12 |
+
doc_to_choice: "{{choices}}"
|
13 |
+
should_decontaminate: true
|
14 |
+
doc_to_decontamination_query: "Question: {{question}}\nRéponse:"
|
15 |
+
metric_list:
|
16 |
+
- metric: acc
|
17 |
+
aggregation: mean
|
18 |
+
higher_is_better: true
|
19 |
+
- metric: acc_norm
|
20 |
+
aggregation: mean
|
21 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_boolqa.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "D'après l'information dans le contexte donné, quelle est la réponse à la question ?"
|
6 |
+
task: french_bench_boolqa
|
7 |
+
dataset_path: manu/french_boolq
|
8 |
+
output_type: multiple_choice
|
9 |
+
validation_split: valid
|
10 |
+
doc_to_text: "\nContexte: {{passage}}\n\nQuestion: {{question}}\n"
|
11 |
+
doc_to_choice: ["Oui", "Non"]
|
12 |
+
# doc_to_text: "\nContexte: {{passage}}\n\nQuestion: {{question}}\n\nD'après l'information dans le contexte, la réponse est:\nA. Oui \nB. Non\n\nRéponse:"
|
13 |
+
# doc_to_choice: ["A", "B"]
|
14 |
+
doc_to_target: "{{[1, 0].index(label)}}"
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: passage
|
17 |
+
metric_list:
|
18 |
+
- metric: acc
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
21 |
+
- metric: acc_norm
|
22 |
+
aggregation: mean
|
23 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'."
|
6 |
+
task: french_bench_fquadv2
|
7 |
+
dataset_path: manu/fquad2_test
|
8 |
+
output_type: generate_until
|
9 |
+
validation_split: valid
|
10 |
+
doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
|
11 |
+
doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}"
|
12 |
+
target_delimiter: " "
|
13 |
+
should_decontaminate: true
|
14 |
+
doc_to_decontamination_query: context
|
15 |
+
generation_kwargs:
|
16 |
+
until:
|
17 |
+
- "\n"
|
18 |
+
# filter_list:
|
19 |
+
# - name: remove_whitespace
|
20 |
+
# filter:
|
21 |
+
# - function: remove_whitespace
|
22 |
+
# - function: take_first
|
23 |
+
metric_list:
|
24 |
+
- metric: !function utils.exact
|
25 |
+
aggregation: mean
|
26 |
+
higher_is_better: true
|
27 |
+
- metric: !function utils.f1
|
28 |
+
aggregation: mean
|
29 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_bool.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "D'après l'information présente dans le contexte, est il possible de répondre à la question ?"
|
6 |
+
task: french_bench_fquadv2_bool
|
7 |
+
dataset_path: manu/fquad2_test
|
8 |
+
output_type: multiple_choice
|
9 |
+
validation_split: valid
|
10 |
+
doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nD'après l'information présente dans le contexte, répondre à la question est:\nA. Possible \nB. Impossible\n\nRéponse:"
|
11 |
+
doc_to_choice: ["A", "B"]
|
12 |
+
doc_to_target: "{{[False, True].index(is_impossible)}}"
|
13 |
+
should_decontaminate: true
|
14 |
+
doc_to_decontamination_query: context
|
15 |
+
metric_list:
|
16 |
+
- metric: acc
|
17 |
+
aggregation: mean
|
18 |
+
higher_is_better: true
|
19 |
+
- metric: acc_norm
|
20 |
+
aggregation: mean
|
21 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_gen
|
5 |
+
description: "D'après l'information dans le contexte donné, quelle question a été posée pour obtenir la réponse donnée ?"
|
6 |
+
task: french_bench_fquadv2_genq
|
7 |
+
dataset_path: manu/fquad2_test
|
8 |
+
output_type: generate_until
|
9 |
+
validation_split: valid_hasAns
|
10 |
+
test_split: test_hasAns
|
11 |
+
fewshot_split: valid_hasAns
|
12 |
+
doc_to_text: "\nContexte: {{context}}\n\nRéponse: {% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}\n\nQuestion:"
|
13 |
+
doc_to_target: "{{question}}"
|
14 |
+
target_delimiter: " "
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: question
|
17 |
+
generation_kwargs:
|
18 |
+
until:
|
19 |
+
- "\n"
|
20 |
+
# filter_list:
|
21 |
+
# - name: remove_whitespace
|
22 |
+
# filter:
|
23 |
+
# - function: remove_whitespace
|
24 |
+
# - function: take_first
|
25 |
+
metric_list:
|
26 |
+
- metric: !function utils.rouge1
|
27 |
+
higher_is_better: true
|
28 |
+
aggregation: !function utils.rouge1_agg
|
29 |
+
- metric: !function utils.f1
|
30 |
+
aggregation: mean
|
31 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_gen
|
5 |
+
description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'."
|
6 |
+
task: french_bench_fquadv2_hasAns
|
7 |
+
dataset_path: manu/fquad2_test
|
8 |
+
output_type: generate_until
|
9 |
+
validation_split: valid_hasAns
|
10 |
+
test_split: test_hasAns
|
11 |
+
fewshot_split: valid_hasAns
|
12 |
+
doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
|
13 |
+
doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}"
|
14 |
+
target_delimiter: " "
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: context
|
17 |
+
generation_kwargs:
|
18 |
+
until:
|
19 |
+
- "\n"
|
20 |
+
# filter_list:
|
21 |
+
# - name: remove_whitespace
|
22 |
+
# filter:
|
23 |
+
# - function: remove_whitespace
|
24 |
+
# - function: take_first
|
25 |
+
metric_list:
|
26 |
+
- metric: !function utils.exact
|
27 |
+
aggregation: mean
|
28 |
+
higher_is_better: true
|
29 |
+
- metric: !function utils.f1
|
30 |
+
aggregation: mean
|
31 |
+
higher_is_better: true
|
32 |
+
- metric: !function utils.rouge1
|
33 |
+
higher_is_better: true
|
34 |
+
aggregation: !function utils.rouge1_agg
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_grammar.yaml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_mc
|
5 |
+
description: "Répond au mieux en complétant la question avec une des réponses proposées."
|
6 |
+
dataset_path: manu/french-bench-grammar-vocab-reading
|
7 |
+
output_type: multiple_choice
|
8 |
+
validation_split: Grammar
|
9 |
+
fewshot_split: Grammar
|
10 |
+
test_split: Grammar
|
11 |
+
#doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
|
12 |
+
#doc_to_choice: ["A", "B", "C", "D"]
|
13 |
+
doc_to_text: "La phrase suivante est correcte grammaticalement:\n"
|
14 |
+
doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}"
|
15 |
+
doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
|
16 |
+
task: french_bench_grammar
|
17 |
+
metric_list:
|
18 |
+
- metric: acc
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_hellaswag.yaml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- french_bench
|
3 |
+
- french_bench_mc
|
4 |
+
task: french_bench_hellaswag
|
5 |
+
dataset_path: manu/french_bench_hellaswag
|
6 |
+
output_type: multiple_choice
|
7 |
+
training_split: validation
|
8 |
+
validation_split: validation
|
9 |
+
test_split: null
|
10 |
+
process_docs: !function utils.process_docs
|
11 |
+
doc_to_text: "{{query}}"
|
12 |
+
doc_to_target: "{{label}}"
|
13 |
+
doc_to_choice: "{{choices}}"
|
14 |
+
metric_list:
|
15 |
+
- metric: acc
|
16 |
+
aggregation: mean
|
17 |
+
higher_is_better: true
|
18 |
+
- metric: acc_norm
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_multifquad.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_gen
|
5 |
+
description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques extraits du contexte."
|
6 |
+
task: french_bench_multifquad
|
7 |
+
dataset_path: manu/multifquad_test
|
8 |
+
output_type: generate_until
|
9 |
+
validation_split: valid
|
10 |
+
test_split: test
|
11 |
+
fewshot_split: valid
|
12 |
+
doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:"
|
13 |
+
doc_to_target: "{{', '.join(answers.text)}}"
|
14 |
+
target_delimiter: " "
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: context
|
17 |
+
generation_kwargs:
|
18 |
+
until:
|
19 |
+
- "\n"
|
20 |
+
# filter_list:
|
21 |
+
# - name: remove_whitespace
|
22 |
+
# filter:
|
23 |
+
# - function: remove_whitespace
|
24 |
+
# - function: take_first
|
25 |
+
metric_list:
|
26 |
+
- metric: !function utils.exact
|
27 |
+
aggregation: mean
|
28 |
+
higher_is_better: true
|
29 |
+
- metric: !function utils.f1
|
30 |
+
aggregation: mean
|
31 |
+
higher_is_better: true
|
32 |
+
- metric: !function utils.rouge1
|
33 |
+
higher_is_better: true
|
34 |
+
aggregation: !function utils.rouge1_agg
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_opus_perplexity.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- french_bench_perplexity
|
3 |
+
task: french_bench_opus_perplexity
|
4 |
+
dataset_path: manu/opus100-en-fr
|
5 |
+
output_type: loglikelihood_rolling
|
6 |
+
test_split: test
|
7 |
+
fewshot_split: validation
|
8 |
+
validation_split: validation
|
9 |
+
num_fewshot: 0
|
10 |
+
doc_to_text: ""
|
11 |
+
doc_to_target: "{{text}}"
|
12 |
+
should_decontaminate: true
|
13 |
+
doc_to_decontamination_query: "{{text}}"
|
14 |
+
metric_list:
|
15 |
+
- metric: word_perplexity
|
16 |
+
aggregation: weighted_perplexity
|
17 |
+
higher_is_better: false
|
18 |
+
- metric: byte_perplexity
|
19 |
+
aggregation: weighted_perplexity
|
20 |
+
higher_is_better: false
|
21 |
+
- metric: bits_per_byte
|
22 |
+
aggregation: bits_per_byte
|
23 |
+
higher_is_better: false
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_orangesum_title.yaml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "Trouve le titre de l'article."
|
6 |
+
task: french_bench_orangesum_title
|
7 |
+
dataset_path: orange_sum
|
8 |
+
dataset_name: title
|
9 |
+
output_type: generate_until
|
10 |
+
validation_split: validation
|
11 |
+
fewshot_split: validation
|
12 |
+
doc_to_text: "\nArticle: {{text}}\n\nTitre:"
|
13 |
+
doc_to_target: "{{summary}}"
|
14 |
+
target_delimiter: " "
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: summary
|
17 |
+
generation_kwargs:
|
18 |
+
until:
|
19 |
+
- "\n"
|
20 |
+
# filter_list:
|
21 |
+
# - name: remove_whitespace
|
22 |
+
# filter:
|
23 |
+
# - function: remove_whitespace
|
24 |
+
# - function: take_first
|
25 |
+
metric_list:
|
26 |
+
- metric: !function utils.rouge1
|
27 |
+
higher_is_better: true
|
28 |
+
aggregation: !function utils.rouge1_agg
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
# description: "Répond au mieux en complétant la question avec une des réponses proposées."
|
6 |
+
dataset_path: manu/french-bench-grammar-vocab-reading
|
7 |
+
output_type: multiple_choice
|
8 |
+
validation_split: Reading
|
9 |
+
fewshot_split: Reading
|
10 |
+
test_split: Reading
|
11 |
+
# doc_to_text: "Context: {{context}}\nQuestion: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
|
12 |
+
# doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}"
|
13 |
+
doc_to_text: "Context: {{context}}\n\n"
|
14 |
+
doc_to_choice: "{{[question.replace('<...>', answerA) if '<...>' in question else question + ' ' +answerA, question.replace('<...>', answerB) if '<...>' in question else question + ' ' + answerB, question.replace('<...>', answerC) if '<...>' in question else question + ' ' + answerC, question.replace('<...>', answerD) if '<...>' in question else question + ' ' + answerD]}}"
|
15 |
+
doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
|
16 |
+
# doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}"
|
17 |
+
# doc_to_target: answer
|
18 |
+
task: french_bench_reading_comp
|
19 |
+
metric_list:
|
20 |
+
- metric: acc
|
21 |
+
aggregation: mean
|
22 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "A propos du thème spécifié, l'avis client est il positif, négatif, ou neutre ?"
|
6 |
+
task: french_bench_topic_based_nli
|
7 |
+
dataset_path: manu/topic_based_nli_test
|
8 |
+
output_type: multiple_choice
|
9 |
+
validation_split: valid
|
10 |
+
# doc_to_text: "\nAvis Client: {{text}}\n\nEn considèrant uniquement le thème \"{{topic}}\", l'avis client est plutot:\nA. Positif \nB. Négatif\nC. Mitigé \nD. Neutre\nE. Absent\n\nRéponse:"
|
11 |
+
# doc_to_choice: ["A", "B", "C", "D", "E"]
|
12 |
+
doc_to_text: "\nAvis Client: {{text}}\n\nA propos du thème \"{{topic}}\", l'avis client est"
|
13 |
+
doc_to_choice: ['positif', 'négatif', 'neutre']
|
14 |
+
doc_to_target: "{{['positif', 'negatif', 'neutre'].index(polarity)}}"
|
15 |
+
should_decontaminate: true
|
16 |
+
doc_to_decontamination_query: texte
|
17 |
+
metric_list:
|
18 |
+
- metric: acc
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
21 |
+
- metric: acc_norm
|
22 |
+
aggregation: mean
|
23 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_trivia.yaml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_gen
|
5 |
+
task: french_bench_trivia
|
6 |
+
dataset_path: manu/french-trivia
|
7 |
+
output_type: generate_until
|
8 |
+
validation_split: train
|
9 |
+
test_split: train
|
10 |
+
fewshot_split: train
|
11 |
+
doc_to_text: "{{Question}}\nAnswer:"
|
12 |
+
doc_to_target: "{{Answer}}"
|
13 |
+
target_delimiter: " "
|
14 |
+
should_decontaminate: true
|
15 |
+
doc_to_decontamination_query: Question
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
# filter_list:
|
20 |
+
# - name: remove_whitespace
|
21 |
+
# filter:
|
22 |
+
# - function: remove_whitespace
|
23 |
+
# - function: take_first
|
24 |
+
metric_list:
|
25 |
+
- metric: !function utils.exact
|
26 |
+
aggregation: mean
|
27 |
+
higher_is_better: true
|
28 |
+
- metric: !function utils.f1
|
29 |
+
aggregation: mean
|
30 |
+
higher_is_better: true
|
31 |
+
- metric: !function utils.rouge1
|
32 |
+
higher_is_better: true
|
33 |
+
aggregation: !function utils.rouge1_agg
|
34 |
+
- metric: !function utils.is_included
|
35 |
+
higher_is_better: true
|
36 |
+
aggregation: mean
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_vocab.yaml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_mc
|
5 |
+
# description: "Répond au mieux en complétant la question avec une des réponses proposées."
|
6 |
+
dataset_path: manu/french-bench-grammar-vocab-reading
|
7 |
+
output_type: multiple_choice
|
8 |
+
validation_split: Vocabulary
|
9 |
+
fewshot_split: Vocabulary
|
10 |
+
test_split: Vocabulary
|
11 |
+
# doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:"
|
12 |
+
# doc_to_choice: ["A", "B", "C", "D"]
|
13 |
+
doc_to_text: "La phrase suivante est logique sémantiquement:\n"
|
14 |
+
doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}"
|
15 |
+
doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}'
|
16 |
+
task: french_bench_vocab
|
17 |
+
metric_list:
|
18 |
+
- metric: acc
|
19 |
+
aggregation: mean
|
20 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group:
|
2 |
+
- french_bench_perplexity
|
3 |
+
task: french_bench_wikitext_fr
|
4 |
+
dataset_path: asi/wikitext_fr
|
5 |
+
dataset_name: wikitext-35
|
6 |
+
output_type: loglikelihood_rolling
|
7 |
+
training_split: train
|
8 |
+
validation_split: validation
|
9 |
+
test_split: test
|
10 |
+
num_fewshot: 0
|
11 |
+
doc_to_text: ""
|
12 |
+
doc_to_target: !function preprocess_wikitext.wikitext_detokenizer
|
13 |
+
process_results: !function preprocess_wikitext.process_results
|
14 |
+
should_decontaminate: true
|
15 |
+
doc_to_decontamination_query: "{{paragraph}}"
|
16 |
+
metric_list:
|
17 |
+
- metric: word_perplexity
|
18 |
+
aggregation: weighted_perplexity
|
19 |
+
higher_is_better: false
|
20 |
+
- metric: byte_perplexity
|
21 |
+
aggregation: weighted_perplexity
|
22 |
+
higher_is_better: false
|
23 |
+
- metric: bits_per_byte
|
24 |
+
aggregation: bits_per_byte
|
25 |
+
higher_is_better: false
|
lm-evaluation/lm_eval/tasks/french_bench/french_bench_xnli.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: "_default_template_yaml"
|
2 |
+
group:
|
3 |
+
- french_bench
|
4 |
+
- french_bench_extra
|
5 |
+
description: "La prémisse et l'hypothèse sont elles en accord, neutres en elles, ou en contradiction ?"
|
6 |
+
dataset_path: xnli
|
7 |
+
dataset_name: fr
|
8 |
+
output_type: multiple_choice
|
9 |
+
validation_split: validation
|
10 |
+
fewshot_split: validation
|
11 |
+
test_split: test
|
12 |
+
# doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont:\nA. En accord\nB. Neutre\nC. En contradiction\nRéponse:"
|
13 |
+
# doc_to_choice: "{{['A: En accord', 'B: Neutre', 'C: En contradiction']}}"
|
14 |
+
doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont"
|
15 |
+
doc_to_choice: "{{['en accord', 'neutres entre elles', 'en contradiction']}}"
|
16 |
+
doc_to_target: label
|
17 |
+
task: french_bench_xnli
|
18 |
+
metric_list:
|
19 |
+
- metric: acc
|
20 |
+
aggregation: mean
|
21 |
+
higher_is_better: true
|
lm-evaluation/lm_eval/tasks/french_bench/preprocess_wikitext.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
|
4 |
+
def wikitext_detokenizer(doc):
|
5 |
+
string = doc["paragraph"]
|
6 |
+
# contractions
|
7 |
+
string = string.replace("s '", "s'")
|
8 |
+
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
|
9 |
+
# number separators
|
10 |
+
string = string.replace(" @-@ ", "-")
|
11 |
+
string = string.replace(" @,@ ", ",")
|
12 |
+
string = string.replace(" @.@ ", ".")
|
13 |
+
# punctuation
|
14 |
+
string = string.replace(" : ", ": ")
|
15 |
+
string = string.replace(" ; ", "; ")
|
16 |
+
string = string.replace(" . ", ". ")
|
17 |
+
string = string.replace(" ! ", "! ")
|
18 |
+
string = string.replace(" ? ", "? ")
|
19 |
+
string = string.replace(" , ", ", ")
|
20 |
+
# double brackets
|
21 |
+
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
|
22 |
+
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
|
23 |
+
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
|
24 |
+
string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
|
25 |
+
string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
|
26 |
+
# miscellaneous
|
27 |
+
string = string.replace("= = = =", "====")
|
28 |
+
string = string.replace("= = =", "===")
|
29 |
+
string = string.replace("= =", "==")
|
30 |
+
string = string.replace(" " + chr(176) + " ", chr(176))
|
31 |
+
string = string.replace(" \n", "\n")
|
32 |
+
string = string.replace("\n ", "\n")
|
33 |
+
string = string.replace(" N ", " 1 ")
|
34 |
+
string = string.replace(" 's", "'s")
|
35 |
+
|
36 |
+
return string
|
37 |
+
|
38 |
+
|
39 |
+
def process_results(doc, results):
|
40 |
+
(loglikelihood,) = results
|
41 |
+
# IMPORTANT: wikitext counts number of words in *original doc before detokenization*
|
42 |
+
_words = len(re.split(r"\s+", doc["paragraph"]))
|
43 |
+
_bytes = len(doc["paragraph"].encode("utf-8"))
|
44 |
+
return {
|
45 |
+
"word_perplexity": (loglikelihood, _words),
|
46 |
+
"byte_perplexity": (loglikelihood, _bytes),
|
47 |
+
"bits_per_byte": (loglikelihood, _bytes),
|
48 |
+
}
|
lm-evaluation/lm_eval/tasks/french_bench/utils.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import re
|
3 |
+
import string
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
import evaluate
|
7 |
+
|
8 |
+
|
9 |
+
def normalize_answer(s):
|
10 |
+
"""Lower text and remove punctuation, articles and extra whitespace."""
|
11 |
+
|
12 |
+
def remove_articles(text):
|
13 |
+
regex = re.compile(r"\b(un|une|des|le|la|les)\b", re.UNICODE)
|
14 |
+
return re.sub(regex, " ", text)
|
15 |
+
|
16 |
+
def white_space_fix(text):
|
17 |
+
return " ".join(text.split())
|
18 |
+
|
19 |
+
def remove_punc(text):
|
20 |
+
exclude = set(string.punctuation)
|
21 |
+
return "".join(ch for ch in text if ch not in exclude)
|
22 |
+
|
23 |
+
def lower(text):
|
24 |
+
return text.lower()
|
25 |
+
|
26 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
27 |
+
|
28 |
+
|
29 |
+
def get_tokens(s):
|
30 |
+
if not s:
|
31 |
+
return []
|
32 |
+
return normalize_answer(s).split()
|
33 |
+
|
34 |
+
|
35 |
+
# Exact match (the normalized answer exactly match the gold answer)
|
36 |
+
def exact(predictions, references):
|
37 |
+
return int(normalize_answer(references[0]) == normalize_answer(predictions[0]))
|
38 |
+
|
39 |
+
|
40 |
+
# The F-score of predicted tokens versus the gold answer
|
41 |
+
def f1(predictions, references):
|
42 |
+
gold_toks = get_tokens(references[0])
|
43 |
+
pred_toks = get_tokens(predictions[0])
|
44 |
+
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
|
45 |
+
num_same = sum(common.values())
|
46 |
+
if len(gold_toks) == 0 or len(pred_toks) == 0:
|
47 |
+
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
|
48 |
+
return int(gold_toks == pred_toks)
|
49 |
+
if num_same == 0:
|
50 |
+
return 0
|
51 |
+
precision = 1.0 * num_same / len(pred_toks)
|
52 |
+
recall = 1.0 * num_same / len(gold_toks)
|
53 |
+
f1 = (2 * precision * recall) / (precision + recall)
|
54 |
+
return f1
|
55 |
+
|
56 |
+
|
57 |
+
def rouge1(items):
|
58 |
+
"""
|
59 |
+
# passthrough for efficiency
|
60 |
+
"""
|
61 |
+
return items
|
62 |
+
|
63 |
+
|
64 |
+
def rouge1_agg(items):
|
65 |
+
"""
|
66 |
+
Higher is better
|
67 |
+
"""
|
68 |
+
refs = list(zip(*items))[0]
|
69 |
+
preds = list(zip(*items))[1]
|
70 |
+
rouge_scorer = evaluate.load("rouge")
|
71 |
+
return rouge_scorer.compute(predictions=preds, references=refs)["rouge1"]
|
72 |
+
|
73 |
+
|
74 |
+
def is_included(items):
|
75 |
+
"""
|
76 |
+
# passthrough for efficiency
|
77 |
+
"""
|
78 |
+
if items[0] in items[1]:
|
79 |
+
return True
|
80 |
+
return False
|
81 |
+
|
82 |
+
|
83 |
+
def preprocess(text):
|
84 |
+
text = text.strip()
|
85 |
+
# NOTE: Brackets are artifacts of the WikiHow dataset portion of HellaSwag.
|
86 |
+
text = text.replace(" [title]", ". ")
|
87 |
+
text = re.sub("\\[.*?\\]", "", text)
|
88 |
+
text = text.replace(" ", " ")
|
89 |
+
return text
|
90 |
+
|
91 |
+
|
92 |
+
def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
|
93 |
+
def _process_doc(doc):
|
94 |
+
ctx = doc["ctx_a"] + " " + doc["ctx_b"].capitalize()
|
95 |
+
out_doc = {
|
96 |
+
"query": preprocess(doc["activity_label"] + ": " + ctx),
|
97 |
+
"choices": [preprocess(ending) for ending in doc["endings"]],
|
98 |
+
"gold": int(doc["label"]),
|
99 |
+
}
|
100 |
+
return out_doc
|
101 |
+
|
102 |
+
return dataset.map(_process_doc)
|
lm-evaluation/lm_eval/tasks/ifeval/instructions_registry.py
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The Google Research Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
"""Registry of all instructions."""
|
16 |
+
from lm_eval.tasks.ifeval import instructions
|
17 |
+
|
18 |
+
|
19 |
+
_KEYWORD = "keywords:"
|
20 |
+
|
21 |
+
_LANGUAGE = "language:"
|
22 |
+
|
23 |
+
_LENGTH = "length_constraints:"
|
24 |
+
|
25 |
+
_CONTENT = "detectable_content:"
|
26 |
+
|
27 |
+
_FORMAT = "detectable_format:"
|
28 |
+
|
29 |
+
_MULTITURN = "multi-turn:"
|
30 |
+
|
31 |
+
_COMBINATION = "combination:"
|
32 |
+
|
33 |
+
_STARTEND = "startend:"
|
34 |
+
|
35 |
+
_CHANGE_CASES = "change_case:"
|
36 |
+
|
37 |
+
_PUNCTUATION = "punctuation:"
|
38 |
+
|
39 |
+
INSTRUCTION_DICT = {
|
40 |
+
_KEYWORD + "existence": instructions.KeywordChecker,
|
41 |
+
_KEYWORD + "frequency": instructions.KeywordFrequencyChecker,
|
42 |
+
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
43 |
+
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
44 |
+
_KEYWORD + "forbidden_words": instructions.ForbiddenWords,
|
45 |
+
_KEYWORD + "letter_frequency": instructions.LetterFrequencyChecker,
|
46 |
+
_LANGUAGE + "response_language": instructions.ResponseLanguageChecker,
|
47 |
+
_LENGTH + "number_sentences": instructions.NumberOfSentences,
|
48 |
+
_LENGTH + "number_paragraphs": instructions.ParagraphChecker,
|
49 |
+
_LENGTH + "number_words": instructions.NumberOfWords,
|
50 |
+
_LENGTH + "nth_paragraph_first_word": instructions.ParagraphFirstWordCheck,
|
51 |
+
_CONTENT + "number_placeholders": instructions.PlaceholderChecker,
|
52 |
+
_CONTENT + "postscript": instructions.PostscriptChecker,
|
53 |
+
_FORMAT + "number_bullet_lists": instructions.BulletListChecker,
|
54 |
+
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
55 |
+
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
56 |
+
_FORMAT + "constrained_response": instructions.ConstrainedResponseChecker,
|
57 |
+
_FORMAT + "number_highlighted_sections": (instructions.HighlightSectionChecker),
|
58 |
+
_FORMAT + "multiple_sections": instructions.SectionChecker,
|
59 |
+
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
60 |
+
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
61 |
+
_FORMAT + "json_format": instructions.JsonFormat,
|
62 |
+
_FORMAT + "title": instructions.TitleChecker,
|
63 |
+
# TODO(tianjianlu): Re-enable with specific prompts.
|
64 |
+
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
65 |
+
_COMBINATION + "two_responses": instructions.TwoResponsesChecker,
|
66 |
+
_COMBINATION + "repeat_prompt": instructions.RepeatPromptThenAnswer,
|
67 |
+
_STARTEND + "end_checker": instructions.EndChecker,
|
68 |
+
_CHANGE_CASES + "capital_word_frequency": instructions.CapitalWordFrequencyChecker,
|
69 |
+
_CHANGE_CASES + "english_capital": instructions.CapitalLettersEnglishChecker,
|
70 |
+
_CHANGE_CASES + "english_lowercase": instructions.LowercaseLettersEnglishChecker,
|
71 |
+
_PUNCTUATION + "no_comma": instructions.CommaChecker,
|
72 |
+
_STARTEND + "quotation": instructions.QuotationChecker,
|
73 |
+
}
|
74 |
+
|
75 |
+
INSTRUCTION_CONFLICTS = {
|
76 |
+
_KEYWORD + "existence": {_KEYWORD + "existence"},
|
77 |
+
_KEYWORD + "frequency": {_KEYWORD + "frequency"},
|
78 |
+
# TODO(jeffreyzhou): make a proper set of sentences to choose from
|
79 |
+
# _KEYWORD + "key_sentences": instructions.KeySentenceChecker,
|
80 |
+
_KEYWORD + "forbidden_words": {_KEYWORD + "forbidden_words"},
|
81 |
+
_KEYWORD + "letter_frequency": {_KEYWORD + "letter_frequency"},
|
82 |
+
_LANGUAGE + "response_language": {
|
83 |
+
_LANGUAGE + "response_language",
|
84 |
+
_FORMAT + "multiple_sections",
|
85 |
+
_KEYWORD + "existence",
|
86 |
+
_KEYWORD + "frequency",
|
87 |
+
_KEYWORD + "forbidden_words",
|
88 |
+
_STARTEND + "end_checker",
|
89 |
+
_CHANGE_CASES + "english_capital",
|
90 |
+
_CHANGE_CASES + "english_lowercase",
|
91 |
+
},
|
92 |
+
_LENGTH + "number_sentences": {_LENGTH + "number_sentences"},
|
93 |
+
_LENGTH + "number_paragraphs": {
|
94 |
+
_LENGTH + "number_paragraphs",
|
95 |
+
_LENGTH + "nth_paragraph_first_word",
|
96 |
+
_LENGTH + "number_sentences",
|
97 |
+
_LENGTH + "nth_paragraph_first_word",
|
98 |
+
},
|
99 |
+
_LENGTH + "number_words": {_LENGTH + "number_words"},
|
100 |
+
_LENGTH + "nth_paragraph_first_word": {
|
101 |
+
_LENGTH + "nth_paragraph_first_word",
|
102 |
+
_LENGTH + "number_paragraphs",
|
103 |
+
},
|
104 |
+
_CONTENT + "number_placeholders": {_CONTENT + "number_placeholders"},
|
105 |
+
_CONTENT + "postscript": {_CONTENT + "postscript"},
|
106 |
+
_FORMAT + "number_bullet_lists": {_FORMAT + "number_bullet_lists"},
|
107 |
+
# TODO(jeffreyzhou): Pre-create paragraph or use prompt to replace
|
108 |
+
# _CONTENT + "rephrase_paragraph": instructions.RephraseParagraph,
|
109 |
+
_FORMAT + "constrained_response": set(INSTRUCTION_DICT.keys()),
|
110 |
+
_FORMAT + "number_highlighted_sections": {_FORMAT + "number_highlighted_sections"},
|
111 |
+
_FORMAT + "multiple_sections": {
|
112 |
+
_FORMAT + "multiple_sections",
|
113 |
+
_LANGUAGE + "response_language",
|
114 |
+
_FORMAT + "number_highlighted_sections",
|
115 |
+
},
|
116 |
+
# TODO(tianjianlu): Re-enable rephrasing with preprocessing the message.
|
117 |
+
# _FORMAT + "rephrase": instructions.RephraseChecker,
|
118 |
+
_FORMAT + "json_format": set(INSTRUCTION_DICT.keys()).difference(
|
119 |
+
{_KEYWORD + "forbidden_words", _KEYWORD + "existence"}
|
120 |
+
),
|
121 |
+
_FORMAT + "title": {_FORMAT + "title"},
|
122 |
+
# TODO(tianjianlu): Re-enable with specific prompts.
|
123 |
+
# _MULTITURN + "constrained_start": instructions.ConstrainedStartChecker,
|
124 |
+
_COMBINATION + "two_responses": set(INSTRUCTION_DICT.keys()).difference(
|
125 |
+
{
|
126 |
+
_KEYWORD + "forbidden_words",
|
127 |
+
_KEYWORD + "existence",
|
128 |
+
_LANGUAGE + "response_language",
|
129 |
+
_FORMAT + "title",
|
130 |
+
_PUNCTUATION + "no_comma",
|
131 |
+
}
|
132 |
+
),
|
133 |
+
_COMBINATION + "repeat_prompt": set(INSTRUCTION_DICT.keys()).difference(
|
134 |
+
{_KEYWORD + "existence", _FORMAT + "title", _PUNCTUATION + "no_comma"}
|
135 |
+
),
|
136 |
+
_STARTEND + "end_checker": {_STARTEND + "end_checker"},
|
137 |
+
_CHANGE_CASES + "capital_word_frequency": {
|
138 |
+
_CHANGE_CASES + "capital_word_frequency",
|
139 |
+
_CHANGE_CASES + "english_lowercase",
|
140 |
+
_CHANGE_CASES + "english_capital",
|
141 |
+
},
|
142 |
+
_CHANGE_CASES + "english_capital": {_CHANGE_CASES + "english_capital"},
|
143 |
+
_CHANGE_CASES + "english_lowercase": {
|
144 |
+
_CHANGE_CASES + "english_lowercase",
|
145 |
+
_CHANGE_CASES + "english_capital",
|
146 |
+
},
|
147 |
+
_PUNCTUATION + "no_comma": {_PUNCTUATION + "no_comma"},
|
148 |
+
_STARTEND + "quotation": {_STARTEND + "quotation", _FORMAT + "title"},
|
149 |
+
}
|
150 |
+
|
151 |
+
|
152 |
+
def conflict_make(conflicts):
|
153 |
+
"""Makes sure if A conflicts with B, B will conflict with A.
|
154 |
+
|
155 |
+
Args:
|
156 |
+
conflicts: Dictionary of potential conflicts where key is instruction id
|
157 |
+
and value is set of instruction ids that it conflicts with.
|
158 |
+
|
159 |
+
Returns:
|
160 |
+
Revised version of the dictionary. All instructions conflict with
|
161 |
+
themselves. If A conflicts with B, B will conflict with A.
|
162 |
+
"""
|
163 |
+
for key in conflicts:
|
164 |
+
for k in conflicts[key]:
|
165 |
+
conflicts[k].add(key)
|
166 |
+
conflicts[key].add(key)
|
167 |
+
return conflicts
|
lm-evaluation/lm_eval/tasks/ifeval/instructions_util.py
ADDED
@@ -0,0 +1,1682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The Google Research Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
"""Utility library of instructions."""
|
16 |
+
|
17 |
+
import functools
|
18 |
+
import random
|
19 |
+
import re
|
20 |
+
|
21 |
+
import immutabledict
|
22 |
+
import nltk
|
23 |
+
|
24 |
+
|
25 |
+
def download_nltk_resources():
|
26 |
+
"""Download 'punkt' if not already installed"""
|
27 |
+
try:
|
28 |
+
nltk.data.find("tokenizers/punkt")
|
29 |
+
except LookupError:
|
30 |
+
nltk.download("punkt")
|
31 |
+
|
32 |
+
|
33 |
+
download_nltk_resources()
|
34 |
+
|
35 |
+
WORD_LIST = [
|
36 |
+
"western",
|
37 |
+
"sentence",
|
38 |
+
"signal",
|
39 |
+
"dump",
|
40 |
+
"spot",
|
41 |
+
"opposite",
|
42 |
+
"bottom",
|
43 |
+
"potato",
|
44 |
+
"administration",
|
45 |
+
"working",
|
46 |
+
"welcome",
|
47 |
+
"morning",
|
48 |
+
"good",
|
49 |
+
"agency",
|
50 |
+
"primary",
|
51 |
+
"wish",
|
52 |
+
"responsibility",
|
53 |
+
"press",
|
54 |
+
"problem",
|
55 |
+
"president",
|
56 |
+
"steal",
|
57 |
+
"brush",
|
58 |
+
"read",
|
59 |
+
"type",
|
60 |
+
"beat",
|
61 |
+
"trainer",
|
62 |
+
"growth",
|
63 |
+
"lock",
|
64 |
+
"bone",
|
65 |
+
"case",
|
66 |
+
"equal",
|
67 |
+
"comfortable",
|
68 |
+
"region",
|
69 |
+
"replacement",
|
70 |
+
"performance",
|
71 |
+
"mate",
|
72 |
+
"walk",
|
73 |
+
"medicine",
|
74 |
+
"film",
|
75 |
+
"thing",
|
76 |
+
"rock",
|
77 |
+
"tap",
|
78 |
+
"total",
|
79 |
+
"competition",
|
80 |
+
"ease",
|
81 |
+
"south",
|
82 |
+
"establishment",
|
83 |
+
"gather",
|
84 |
+
"parking",
|
85 |
+
"world",
|
86 |
+
"plenty",
|
87 |
+
"breath",
|
88 |
+
"claim",
|
89 |
+
"alcohol",
|
90 |
+
"trade",
|
91 |
+
"dear",
|
92 |
+
"highlight",
|
93 |
+
"street",
|
94 |
+
"matter",
|
95 |
+
"decision",
|
96 |
+
"mess",
|
97 |
+
"agreement",
|
98 |
+
"studio",
|
99 |
+
"coach",
|
100 |
+
"assist",
|
101 |
+
"brain",
|
102 |
+
"wing",
|
103 |
+
"style",
|
104 |
+
"private",
|
105 |
+
"top",
|
106 |
+
"brown",
|
107 |
+
"leg",
|
108 |
+
"buy",
|
109 |
+
"procedure",
|
110 |
+
"method",
|
111 |
+
"speed",
|
112 |
+
"high",
|
113 |
+
"company",
|
114 |
+
"valuable",
|
115 |
+
"pie",
|
116 |
+
"analyst",
|
117 |
+
"session",
|
118 |
+
"pattern",
|
119 |
+
"district",
|
120 |
+
"pleasure",
|
121 |
+
"dinner",
|
122 |
+
"swimming",
|
123 |
+
"joke",
|
124 |
+
"order",
|
125 |
+
"plate",
|
126 |
+
"department",
|
127 |
+
"motor",
|
128 |
+
"cell",
|
129 |
+
"spend",
|
130 |
+
"cabinet",
|
131 |
+
"difference",
|
132 |
+
"power",
|
133 |
+
"examination",
|
134 |
+
"engine",
|
135 |
+
"horse",
|
136 |
+
"dimension",
|
137 |
+
"pay",
|
138 |
+
"toe",
|
139 |
+
"curve",
|
140 |
+
"literature",
|
141 |
+
"bother",
|
142 |
+
"fire",
|
143 |
+
"possibility",
|
144 |
+
"debate",
|
145 |
+
"activity",
|
146 |
+
"passage",
|
147 |
+
"hello",
|
148 |
+
"cycle",
|
149 |
+
"background",
|
150 |
+
"quiet",
|
151 |
+
"author",
|
152 |
+
"effect",
|
153 |
+
"actor",
|
154 |
+
"page",
|
155 |
+
"bicycle",
|
156 |
+
"error",
|
157 |
+
"throat",
|
158 |
+
"attack",
|
159 |
+
"character",
|
160 |
+
"phone",
|
161 |
+
"tea",
|
162 |
+
"increase",
|
163 |
+
"outcome",
|
164 |
+
"file",
|
165 |
+
"specific",
|
166 |
+
"inspector",
|
167 |
+
"internal",
|
168 |
+
"potential",
|
169 |
+
"staff",
|
170 |
+
"building",
|
171 |
+
"employer",
|
172 |
+
"shoe",
|
173 |
+
"hand",
|
174 |
+
"direction",
|
175 |
+
"garden",
|
176 |
+
"purchase",
|
177 |
+
"interview",
|
178 |
+
"study",
|
179 |
+
"recognition",
|
180 |
+
"member",
|
181 |
+
"spiritual",
|
182 |
+
"oven",
|
183 |
+
"sandwich",
|
184 |
+
"weird",
|
185 |
+
"passenger",
|
186 |
+
"particular",
|
187 |
+
"response",
|
188 |
+
"reaction",
|
189 |
+
"size",
|
190 |
+
"variation",
|
191 |
+
"a",
|
192 |
+
"cancel",
|
193 |
+
"candy",
|
194 |
+
"exit",
|
195 |
+
"guest",
|
196 |
+
"condition",
|
197 |
+
"fly",
|
198 |
+
"price",
|
199 |
+
"weakness",
|
200 |
+
"convert",
|
201 |
+
"hotel",
|
202 |
+
"great",
|
203 |
+
"mouth",
|
204 |
+
"mind",
|
205 |
+
"song",
|
206 |
+
"sugar",
|
207 |
+
"suspect",
|
208 |
+
"telephone",
|
209 |
+
"ear",
|
210 |
+
"roof",
|
211 |
+
"paint",
|
212 |
+
"refrigerator",
|
213 |
+
"organization",
|
214 |
+
"jury",
|
215 |
+
"reward",
|
216 |
+
"engineering",
|
217 |
+
"day",
|
218 |
+
"possession",
|
219 |
+
"crew",
|
220 |
+
"bar",
|
221 |
+
"road",
|
222 |
+
"description",
|
223 |
+
"celebration",
|
224 |
+
"score",
|
225 |
+
"mark",
|
226 |
+
"letter",
|
227 |
+
"shower",
|
228 |
+
"suggestion",
|
229 |
+
"sir",
|
230 |
+
"luck",
|
231 |
+
"national",
|
232 |
+
"progress",
|
233 |
+
"hall",
|
234 |
+
"stroke",
|
235 |
+
"theory",
|
236 |
+
"offer",
|
237 |
+
"story",
|
238 |
+
"tax",
|
239 |
+
"definition",
|
240 |
+
"history",
|
241 |
+
"ride",
|
242 |
+
"medium",
|
243 |
+
"opening",
|
244 |
+
"glass",
|
245 |
+
"elevator",
|
246 |
+
"stomach",
|
247 |
+
"question",
|
248 |
+
"ability",
|
249 |
+
"leading",
|
250 |
+
"village",
|
251 |
+
"computer",
|
252 |
+
"city",
|
253 |
+
"grand",
|
254 |
+
"confidence",
|
255 |
+
"candle",
|
256 |
+
"priest",
|
257 |
+
"recommendation",
|
258 |
+
"point",
|
259 |
+
"necessary",
|
260 |
+
"body",
|
261 |
+
"desk",
|
262 |
+
"secret",
|
263 |
+
"horror",
|
264 |
+
"noise",
|
265 |
+
"culture",
|
266 |
+
"warning",
|
267 |
+
"water",
|
268 |
+
"round",
|
269 |
+
"diet",
|
270 |
+
"flower",
|
271 |
+
"bus",
|
272 |
+
"tough",
|
273 |
+
"permission",
|
274 |
+
"week",
|
275 |
+
"prompt",
|
276 |
+
"connection",
|
277 |
+
"abuse",
|
278 |
+
"height",
|
279 |
+
"save",
|
280 |
+
"corner",
|
281 |
+
"border",
|
282 |
+
"stress",
|
283 |
+
"drive",
|
284 |
+
"stop",
|
285 |
+
"rip",
|
286 |
+
"meal",
|
287 |
+
"listen",
|
288 |
+
"confusion",
|
289 |
+
"girlfriend",
|
290 |
+
"living",
|
291 |
+
"relation",
|
292 |
+
"significance",
|
293 |
+
"plan",
|
294 |
+
"creative",
|
295 |
+
"atmosphere",
|
296 |
+
"blame",
|
297 |
+
"invite",
|
298 |
+
"housing",
|
299 |
+
"paper",
|
300 |
+
"drink",
|
301 |
+
"roll",
|
302 |
+
"silver",
|
303 |
+
"drunk",
|
304 |
+
"age",
|
305 |
+
"damage",
|
306 |
+
"smoke",
|
307 |
+
"environment",
|
308 |
+
"pack",
|
309 |
+
"savings",
|
310 |
+
"influence",
|
311 |
+
"tourist",
|
312 |
+
"rain",
|
313 |
+
"post",
|
314 |
+
"sign",
|
315 |
+
"grandmother",
|
316 |
+
"run",
|
317 |
+
"profit",
|
318 |
+
"push",
|
319 |
+
"clerk",
|
320 |
+
"final",
|
321 |
+
"wine",
|
322 |
+
"swim",
|
323 |
+
"pause",
|
324 |
+
"stuff",
|
325 |
+
"singer",
|
326 |
+
"funeral",
|
327 |
+
"average",
|
328 |
+
"source",
|
329 |
+
"scene",
|
330 |
+
"tradition",
|
331 |
+
"personal",
|
332 |
+
"snow",
|
333 |
+
"nobody",
|
334 |
+
"distance",
|
335 |
+
"sort",
|
336 |
+
"sensitive",
|
337 |
+
"animal",
|
338 |
+
"major",
|
339 |
+
"negotiation",
|
340 |
+
"click",
|
341 |
+
"mood",
|
342 |
+
"period",
|
343 |
+
"arrival",
|
344 |
+
"expression",
|
345 |
+
"holiday",
|
346 |
+
"repeat",
|
347 |
+
"dust",
|
348 |
+
"closet",
|
349 |
+
"gold",
|
350 |
+
"bad",
|
351 |
+
"sail",
|
352 |
+
"combination",
|
353 |
+
"clothes",
|
354 |
+
"emphasis",
|
355 |
+
"duty",
|
356 |
+
"black",
|
357 |
+
"step",
|
358 |
+
"school",
|
359 |
+
"jump",
|
360 |
+
"document",
|
361 |
+
"professional",
|
362 |
+
"lip",
|
363 |
+
"chemical",
|
364 |
+
"front",
|
365 |
+
"wake",
|
366 |
+
"while",
|
367 |
+
"inside",
|
368 |
+
"watch",
|
369 |
+
"row",
|
370 |
+
"subject",
|
371 |
+
"penalty",
|
372 |
+
"balance",
|
373 |
+
"possible",
|
374 |
+
"adult",
|
375 |
+
"aside",
|
376 |
+
"sample",
|
377 |
+
"appeal",
|
378 |
+
"wedding",
|
379 |
+
"depth",
|
380 |
+
"king",
|
381 |
+
"award",
|
382 |
+
"wife",
|
383 |
+
"blow",
|
384 |
+
"site",
|
385 |
+
"camp",
|
386 |
+
"music",
|
387 |
+
"safe",
|
388 |
+
"gift",
|
389 |
+
"fault",
|
390 |
+
"guess",
|
391 |
+
"act",
|
392 |
+
"shame",
|
393 |
+
"drama",
|
394 |
+
"capital",
|
395 |
+
"exam",
|
396 |
+
"stupid",
|
397 |
+
"record",
|
398 |
+
"sound",
|
399 |
+
"swing",
|
400 |
+
"novel",
|
401 |
+
"minimum",
|
402 |
+
"ratio",
|
403 |
+
"machine",
|
404 |
+
"shape",
|
405 |
+
"lead",
|
406 |
+
"operation",
|
407 |
+
"salary",
|
408 |
+
"cloud",
|
409 |
+
"affair",
|
410 |
+
"hit",
|
411 |
+
"chapter",
|
412 |
+
"stage",
|
413 |
+
"quantity",
|
414 |
+
"access",
|
415 |
+
"army",
|
416 |
+
"chain",
|
417 |
+
"traffic",
|
418 |
+
"kick",
|
419 |
+
"analysis",
|
420 |
+
"airport",
|
421 |
+
"time",
|
422 |
+
"vacation",
|
423 |
+
"philosophy",
|
424 |
+
"ball",
|
425 |
+
"chest",
|
426 |
+
"thanks",
|
427 |
+
"place",
|
428 |
+
"mountain",
|
429 |
+
"advertising",
|
430 |
+
"red",
|
431 |
+
"past",
|
432 |
+
"rent",
|
433 |
+
"return",
|
434 |
+
"tour",
|
435 |
+
"house",
|
436 |
+
"construction",
|
437 |
+
"net",
|
438 |
+
"native",
|
439 |
+
"war",
|
440 |
+
"figure",
|
441 |
+
"fee",
|
442 |
+
"spray",
|
443 |
+
"user",
|
444 |
+
"dirt",
|
445 |
+
"shot",
|
446 |
+
"task",
|
447 |
+
"stick",
|
448 |
+
"friend",
|
449 |
+
"software",
|
450 |
+
"promotion",
|
451 |
+
"interaction",
|
452 |
+
"surround",
|
453 |
+
"block",
|
454 |
+
"purpose",
|
455 |
+
"practice",
|
456 |
+
"conflict",
|
457 |
+
"routine",
|
458 |
+
"requirement",
|
459 |
+
"bonus",
|
460 |
+
"hole",
|
461 |
+
"state",
|
462 |
+
"junior",
|
463 |
+
"sweet",
|
464 |
+
"catch",
|
465 |
+
"tear",
|
466 |
+
"fold",
|
467 |
+
"wall",
|
468 |
+
"editor",
|
469 |
+
"life",
|
470 |
+
"position",
|
471 |
+
"pound",
|
472 |
+
"respect",
|
473 |
+
"bathroom",
|
474 |
+
"coat",
|
475 |
+
"script",
|
476 |
+
"job",
|
477 |
+
"teach",
|
478 |
+
"birth",
|
479 |
+
"view",
|
480 |
+
"resolve",
|
481 |
+
"theme",
|
482 |
+
"employee",
|
483 |
+
"doubt",
|
484 |
+
"market",
|
485 |
+
"education",
|
486 |
+
"serve",
|
487 |
+
"recover",
|
488 |
+
"tone",
|
489 |
+
"harm",
|
490 |
+
"miss",
|
491 |
+
"union",
|
492 |
+
"understanding",
|
493 |
+
"cow",
|
494 |
+
"river",
|
495 |
+
"association",
|
496 |
+
"concept",
|
497 |
+
"training",
|
498 |
+
"recipe",
|
499 |
+
"relationship",
|
500 |
+
"reserve",
|
501 |
+
"depression",
|
502 |
+
"proof",
|
503 |
+
"hair",
|
504 |
+
"revenue",
|
505 |
+
"independent",
|
506 |
+
"lift",
|
507 |
+
"assignment",
|
508 |
+
"temporary",
|
509 |
+
"amount",
|
510 |
+
"loss",
|
511 |
+
"edge",
|
512 |
+
"track",
|
513 |
+
"check",
|
514 |
+
"rope",
|
515 |
+
"estimate",
|
516 |
+
"pollution",
|
517 |
+
"stable",
|
518 |
+
"message",
|
519 |
+
"delivery",
|
520 |
+
"perspective",
|
521 |
+
"mirror",
|
522 |
+
"assistant",
|
523 |
+
"representative",
|
524 |
+
"witness",
|
525 |
+
"nature",
|
526 |
+
"judge",
|
527 |
+
"fruit",
|
528 |
+
"tip",
|
529 |
+
"devil",
|
530 |
+
"town",
|
531 |
+
"emergency",
|
532 |
+
"upper",
|
533 |
+
"drop",
|
534 |
+
"stay",
|
535 |
+
"human",
|
536 |
+
"neck",
|
537 |
+
"speaker",
|
538 |
+
"network",
|
539 |
+
"sing",
|
540 |
+
"resist",
|
541 |
+
"league",
|
542 |
+
"trip",
|
543 |
+
"signature",
|
544 |
+
"lawyer",
|
545 |
+
"importance",
|
546 |
+
"gas",
|
547 |
+
"choice",
|
548 |
+
"engineer",
|
549 |
+
"success",
|
550 |
+
"part",
|
551 |
+
"external",
|
552 |
+
"worker",
|
553 |
+
"simple",
|
554 |
+
"quarter",
|
555 |
+
"student",
|
556 |
+
"heart",
|
557 |
+
"pass",
|
558 |
+
"spite",
|
559 |
+
"shift",
|
560 |
+
"rough",
|
561 |
+
"lady",
|
562 |
+
"grass",
|
563 |
+
"community",
|
564 |
+
"garage",
|
565 |
+
"youth",
|
566 |
+
"standard",
|
567 |
+
"skirt",
|
568 |
+
"promise",
|
569 |
+
"blind",
|
570 |
+
"television",
|
571 |
+
"disease",
|
572 |
+
"commission",
|
573 |
+
"positive",
|
574 |
+
"energy",
|
575 |
+
"calm",
|
576 |
+
"presence",
|
577 |
+
"tune",
|
578 |
+
"basis",
|
579 |
+
"preference",
|
580 |
+
"head",
|
581 |
+
"common",
|
582 |
+
"cut",
|
583 |
+
"somewhere",
|
584 |
+
"presentation",
|
585 |
+
"current",
|
586 |
+
"thought",
|
587 |
+
"revolution",
|
588 |
+
"effort",
|
589 |
+
"master",
|
590 |
+
"implement",
|
591 |
+
"republic",
|
592 |
+
"floor",
|
593 |
+
"principle",
|
594 |
+
"stranger",
|
595 |
+
"shoulder",
|
596 |
+
"grade",
|
597 |
+
"button",
|
598 |
+
"tennis",
|
599 |
+
"police",
|
600 |
+
"collection",
|
601 |
+
"account",
|
602 |
+
"register",
|
603 |
+
"glove",
|
604 |
+
"divide",
|
605 |
+
"professor",
|
606 |
+
"chair",
|
607 |
+
"priority",
|
608 |
+
"combine",
|
609 |
+
"peace",
|
610 |
+
"extension",
|
611 |
+
"maybe",
|
612 |
+
"evening",
|
613 |
+
"frame",
|
614 |
+
"sister",
|
615 |
+
"wave",
|
616 |
+
"code",
|
617 |
+
"application",
|
618 |
+
"mouse",
|
619 |
+
"match",
|
620 |
+
"counter",
|
621 |
+
"bottle",
|
622 |
+
"half",
|
623 |
+
"cheek",
|
624 |
+
"resolution",
|
625 |
+
"back",
|
626 |
+
"knowledge",
|
627 |
+
"make",
|
628 |
+
"discussion",
|
629 |
+
"screw",
|
630 |
+
"length",
|
631 |
+
"accident",
|
632 |
+
"battle",
|
633 |
+
"dress",
|
634 |
+
"knee",
|
635 |
+
"log",
|
636 |
+
"package",
|
637 |
+
"it",
|
638 |
+
"turn",
|
639 |
+
"hearing",
|
640 |
+
"newspaper",
|
641 |
+
"layer",
|
642 |
+
"wealth",
|
643 |
+
"profile",
|
644 |
+
"imagination",
|
645 |
+
"answer",
|
646 |
+
"weekend",
|
647 |
+
"teacher",
|
648 |
+
"appearance",
|
649 |
+
"meet",
|
650 |
+
"bike",
|
651 |
+
"rise",
|
652 |
+
"belt",
|
653 |
+
"crash",
|
654 |
+
"bowl",
|
655 |
+
"equivalent",
|
656 |
+
"support",
|
657 |
+
"image",
|
658 |
+
"poem",
|
659 |
+
"risk",
|
660 |
+
"excitement",
|
661 |
+
"remote",
|
662 |
+
"secretary",
|
663 |
+
"public",
|
664 |
+
"produce",
|
665 |
+
"plane",
|
666 |
+
"display",
|
667 |
+
"money",
|
668 |
+
"sand",
|
669 |
+
"situation",
|
670 |
+
"punch",
|
671 |
+
"customer",
|
672 |
+
"title",
|
673 |
+
"shake",
|
674 |
+
"mortgage",
|
675 |
+
"option",
|
676 |
+
"number",
|
677 |
+
"pop",
|
678 |
+
"window",
|
679 |
+
"extent",
|
680 |
+
"nothing",
|
681 |
+
"experience",
|
682 |
+
"opinion",
|
683 |
+
"departure",
|
684 |
+
"dance",
|
685 |
+
"indication",
|
686 |
+
"boy",
|
687 |
+
"material",
|
688 |
+
"band",
|
689 |
+
"leader",
|
690 |
+
"sun",
|
691 |
+
"beautiful",
|
692 |
+
"muscle",
|
693 |
+
"farmer",
|
694 |
+
"variety",
|
695 |
+
"fat",
|
696 |
+
"handle",
|
697 |
+
"director",
|
698 |
+
"opportunity",
|
699 |
+
"calendar",
|
700 |
+
"outside",
|
701 |
+
"pace",
|
702 |
+
"bath",
|
703 |
+
"fish",
|
704 |
+
"consequence",
|
705 |
+
"put",
|
706 |
+
"owner",
|
707 |
+
"go",
|
708 |
+
"doctor",
|
709 |
+
"information",
|
710 |
+
"share",
|
711 |
+
"hurt",
|
712 |
+
"protection",
|
713 |
+
"career",
|
714 |
+
"finance",
|
715 |
+
"force",
|
716 |
+
"golf",
|
717 |
+
"garbage",
|
718 |
+
"aspect",
|
719 |
+
"kid",
|
720 |
+
"food",
|
721 |
+
"boot",
|
722 |
+
"milk",
|
723 |
+
"respond",
|
724 |
+
"objective",
|
725 |
+
"reality",
|
726 |
+
"raw",
|
727 |
+
"ring",
|
728 |
+
"mall",
|
729 |
+
"one",
|
730 |
+
"impact",
|
731 |
+
"area",
|
732 |
+
"news",
|
733 |
+
"international",
|
734 |
+
"series",
|
735 |
+
"impress",
|
736 |
+
"mother",
|
737 |
+
"shelter",
|
738 |
+
"strike",
|
739 |
+
"loan",
|
740 |
+
"month",
|
741 |
+
"seat",
|
742 |
+
"anything",
|
743 |
+
"entertainment",
|
744 |
+
"familiar",
|
745 |
+
"clue",
|
746 |
+
"year",
|
747 |
+
"glad",
|
748 |
+
"supermarket",
|
749 |
+
"natural",
|
750 |
+
"god",
|
751 |
+
"cost",
|
752 |
+
"conversation",
|
753 |
+
"tie",
|
754 |
+
"ruin",
|
755 |
+
"comfort",
|
756 |
+
"earth",
|
757 |
+
"storm",
|
758 |
+
"percentage",
|
759 |
+
"assistance",
|
760 |
+
"budget",
|
761 |
+
"strength",
|
762 |
+
"beginning",
|
763 |
+
"sleep",
|
764 |
+
"other",
|
765 |
+
"young",
|
766 |
+
"unit",
|
767 |
+
"fill",
|
768 |
+
"store",
|
769 |
+
"desire",
|
770 |
+
"hide",
|
771 |
+
"value",
|
772 |
+
"cup",
|
773 |
+
"maintenance",
|
774 |
+
"nurse",
|
775 |
+
"function",
|
776 |
+
"tower",
|
777 |
+
"role",
|
778 |
+
"class",
|
779 |
+
"camera",
|
780 |
+
"database",
|
781 |
+
"panic",
|
782 |
+
"nation",
|
783 |
+
"basket",
|
784 |
+
"ice",
|
785 |
+
"art",
|
786 |
+
"spirit",
|
787 |
+
"chart",
|
788 |
+
"exchange",
|
789 |
+
"feedback",
|
790 |
+
"statement",
|
791 |
+
"reputation",
|
792 |
+
"search",
|
793 |
+
"hunt",
|
794 |
+
"exercise",
|
795 |
+
"nasty",
|
796 |
+
"notice",
|
797 |
+
"male",
|
798 |
+
"yard",
|
799 |
+
"annual",
|
800 |
+
"collar",
|
801 |
+
"date",
|
802 |
+
"platform",
|
803 |
+
"plant",
|
804 |
+
"fortune",
|
805 |
+
"passion",
|
806 |
+
"friendship",
|
807 |
+
"spread",
|
808 |
+
"cancer",
|
809 |
+
"ticket",
|
810 |
+
"attitude",
|
811 |
+
"island",
|
812 |
+
"active",
|
813 |
+
"object",
|
814 |
+
"service",
|
815 |
+
"buyer",
|
816 |
+
"bite",
|
817 |
+
"card",
|
818 |
+
"face",
|
819 |
+
"steak",
|
820 |
+
"proposal",
|
821 |
+
"patient",
|
822 |
+
"heat",
|
823 |
+
"rule",
|
824 |
+
"resident",
|
825 |
+
"broad",
|
826 |
+
"politics",
|
827 |
+
"west",
|
828 |
+
"knife",
|
829 |
+
"expert",
|
830 |
+
"girl",
|
831 |
+
"design",
|
832 |
+
"salt",
|
833 |
+
"baseball",
|
834 |
+
"grab",
|
835 |
+
"inspection",
|
836 |
+
"cousin",
|
837 |
+
"couple",
|
838 |
+
"magazine",
|
839 |
+
"cook",
|
840 |
+
"dependent",
|
841 |
+
"security",
|
842 |
+
"chicken",
|
843 |
+
"version",
|
844 |
+
"currency",
|
845 |
+
"ladder",
|
846 |
+
"scheme",
|
847 |
+
"kitchen",
|
848 |
+
"employment",
|
849 |
+
"local",
|
850 |
+
"attention",
|
851 |
+
"manager",
|
852 |
+
"fact",
|
853 |
+
"cover",
|
854 |
+
"sad",
|
855 |
+
"guard",
|
856 |
+
"relative",
|
857 |
+
"county",
|
858 |
+
"rate",
|
859 |
+
"lunch",
|
860 |
+
"program",
|
861 |
+
"initiative",
|
862 |
+
"gear",
|
863 |
+
"bridge",
|
864 |
+
"breast",
|
865 |
+
"talk",
|
866 |
+
"dish",
|
867 |
+
"guarantee",
|
868 |
+
"beer",
|
869 |
+
"vehicle",
|
870 |
+
"reception",
|
871 |
+
"woman",
|
872 |
+
"substance",
|
873 |
+
"copy",
|
874 |
+
"lecture",
|
875 |
+
"advantage",
|
876 |
+
"park",
|
877 |
+
"cold",
|
878 |
+
"death",
|
879 |
+
"mix",
|
880 |
+
"hold",
|
881 |
+
"scale",
|
882 |
+
"tomorrow",
|
883 |
+
"blood",
|
884 |
+
"request",
|
885 |
+
"green",
|
886 |
+
"cookie",
|
887 |
+
"church",
|
888 |
+
"strip",
|
889 |
+
"forever",
|
890 |
+
"beyond",
|
891 |
+
"debt",
|
892 |
+
"tackle",
|
893 |
+
"wash",
|
894 |
+
"following",
|
895 |
+
"feel",
|
896 |
+
"maximum",
|
897 |
+
"sector",
|
898 |
+
"sea",
|
899 |
+
"property",
|
900 |
+
"economics",
|
901 |
+
"menu",
|
902 |
+
"bench",
|
903 |
+
"try",
|
904 |
+
"language",
|
905 |
+
"start",
|
906 |
+
"call",
|
907 |
+
"solid",
|
908 |
+
"address",
|
909 |
+
"income",
|
910 |
+
"foot",
|
911 |
+
"senior",
|
912 |
+
"honey",
|
913 |
+
"few",
|
914 |
+
"mixture",
|
915 |
+
"cash",
|
916 |
+
"grocery",
|
917 |
+
"link",
|
918 |
+
"map",
|
919 |
+
"form",
|
920 |
+
"factor",
|
921 |
+
"pot",
|
922 |
+
"model",
|
923 |
+
"writer",
|
924 |
+
"farm",
|
925 |
+
"winter",
|
926 |
+
"skill",
|
927 |
+
"anywhere",
|
928 |
+
"birthday",
|
929 |
+
"policy",
|
930 |
+
"release",
|
931 |
+
"husband",
|
932 |
+
"lab",
|
933 |
+
"hurry",
|
934 |
+
"mail",
|
935 |
+
"equipment",
|
936 |
+
"sink",
|
937 |
+
"pair",
|
938 |
+
"driver",
|
939 |
+
"consideration",
|
940 |
+
"leather",
|
941 |
+
"skin",
|
942 |
+
"blue",
|
943 |
+
"boat",
|
944 |
+
"sale",
|
945 |
+
"brick",
|
946 |
+
"two",
|
947 |
+
"feed",
|
948 |
+
"square",
|
949 |
+
"dot",
|
950 |
+
"rush",
|
951 |
+
"dream",
|
952 |
+
"location",
|
953 |
+
"afternoon",
|
954 |
+
"manufacturer",
|
955 |
+
"control",
|
956 |
+
"occasion",
|
957 |
+
"trouble",
|
958 |
+
"introduction",
|
959 |
+
"advice",
|
960 |
+
"bet",
|
961 |
+
"eat",
|
962 |
+
"kill",
|
963 |
+
"category",
|
964 |
+
"manner",
|
965 |
+
"office",
|
966 |
+
"estate",
|
967 |
+
"pride",
|
968 |
+
"awareness",
|
969 |
+
"slip",
|
970 |
+
"crack",
|
971 |
+
"client",
|
972 |
+
"nail",
|
973 |
+
"shoot",
|
974 |
+
"membership",
|
975 |
+
"soft",
|
976 |
+
"anybody",
|
977 |
+
"web",
|
978 |
+
"official",
|
979 |
+
"individual",
|
980 |
+
"pizza",
|
981 |
+
"interest",
|
982 |
+
"bag",
|
983 |
+
"spell",
|
984 |
+
"profession",
|
985 |
+
"queen",
|
986 |
+
"deal",
|
987 |
+
"resource",
|
988 |
+
"ship",
|
989 |
+
"guy",
|
990 |
+
"chocolate",
|
991 |
+
"joint",
|
992 |
+
"formal",
|
993 |
+
"upstairs",
|
994 |
+
"car",
|
995 |
+
"resort",
|
996 |
+
"abroad",
|
997 |
+
"dealer",
|
998 |
+
"associate",
|
999 |
+
"finger",
|
1000 |
+
"surgery",
|
1001 |
+
"comment",
|
1002 |
+
"team",
|
1003 |
+
"detail",
|
1004 |
+
"crazy",
|
1005 |
+
"path",
|
1006 |
+
"tale",
|
1007 |
+
"initial",
|
1008 |
+
"arm",
|
1009 |
+
"radio",
|
1010 |
+
"demand",
|
1011 |
+
"single",
|
1012 |
+
"draw",
|
1013 |
+
"yellow",
|
1014 |
+
"contest",
|
1015 |
+
"piece",
|
1016 |
+
"quote",
|
1017 |
+
"pull",
|
1018 |
+
"commercial",
|
1019 |
+
"shirt",
|
1020 |
+
"contribution",
|
1021 |
+
"cream",
|
1022 |
+
"channel",
|
1023 |
+
"suit",
|
1024 |
+
"discipline",
|
1025 |
+
"instruction",
|
1026 |
+
"concert",
|
1027 |
+
"speech",
|
1028 |
+
"low",
|
1029 |
+
"effective",
|
1030 |
+
"hang",
|
1031 |
+
"scratch",
|
1032 |
+
"industry",
|
1033 |
+
"breakfast",
|
1034 |
+
"lay",
|
1035 |
+
"join",
|
1036 |
+
"metal",
|
1037 |
+
"bedroom",
|
1038 |
+
"minute",
|
1039 |
+
"product",
|
1040 |
+
"rest",
|
1041 |
+
"temperature",
|
1042 |
+
"many",
|
1043 |
+
"give",
|
1044 |
+
"argument",
|
1045 |
+
"print",
|
1046 |
+
"purple",
|
1047 |
+
"laugh",
|
1048 |
+
"health",
|
1049 |
+
"credit",
|
1050 |
+
"investment",
|
1051 |
+
"sell",
|
1052 |
+
"setting",
|
1053 |
+
"lesson",
|
1054 |
+
"egg",
|
1055 |
+
"middle",
|
1056 |
+
"marriage",
|
1057 |
+
"level",
|
1058 |
+
"evidence",
|
1059 |
+
"phrase",
|
1060 |
+
"love",
|
1061 |
+
"self",
|
1062 |
+
"benefit",
|
1063 |
+
"guidance",
|
1064 |
+
"affect",
|
1065 |
+
"you",
|
1066 |
+
"dad",
|
1067 |
+
"anxiety",
|
1068 |
+
"special",
|
1069 |
+
"boyfriend",
|
1070 |
+
"test",
|
1071 |
+
"blank",
|
1072 |
+
"payment",
|
1073 |
+
"soup",
|
1074 |
+
"obligation",
|
1075 |
+
"reply",
|
1076 |
+
"smile",
|
1077 |
+
"deep",
|
1078 |
+
"complaint",
|
1079 |
+
"addition",
|
1080 |
+
"review",
|
1081 |
+
"box",
|
1082 |
+
"towel",
|
1083 |
+
"minor",
|
1084 |
+
"fun",
|
1085 |
+
"soil",
|
1086 |
+
"issue",
|
1087 |
+
"cigarette",
|
1088 |
+
"internet",
|
1089 |
+
"gain",
|
1090 |
+
"tell",
|
1091 |
+
"entry",
|
1092 |
+
"spare",
|
1093 |
+
"incident",
|
1094 |
+
"family",
|
1095 |
+
"refuse",
|
1096 |
+
"branch",
|
1097 |
+
"can",
|
1098 |
+
"pen",
|
1099 |
+
"grandfather",
|
1100 |
+
"constant",
|
1101 |
+
"tank",
|
1102 |
+
"uncle",
|
1103 |
+
"climate",
|
1104 |
+
"ground",
|
1105 |
+
"volume",
|
1106 |
+
"communication",
|
1107 |
+
"kind",
|
1108 |
+
"poet",
|
1109 |
+
"child",
|
1110 |
+
"screen",
|
1111 |
+
"mine",
|
1112 |
+
"quit",
|
1113 |
+
"gene",
|
1114 |
+
"lack",
|
1115 |
+
"charity",
|
1116 |
+
"memory",
|
1117 |
+
"tooth",
|
1118 |
+
"fear",
|
1119 |
+
"mention",
|
1120 |
+
"marketing",
|
1121 |
+
"reveal",
|
1122 |
+
"reason",
|
1123 |
+
"court",
|
1124 |
+
"season",
|
1125 |
+
"freedom",
|
1126 |
+
"land",
|
1127 |
+
"sport",
|
1128 |
+
"audience",
|
1129 |
+
"classroom",
|
1130 |
+
"law",
|
1131 |
+
"hook",
|
1132 |
+
"win",
|
1133 |
+
"carry",
|
1134 |
+
"eye",
|
1135 |
+
"smell",
|
1136 |
+
"distribution",
|
1137 |
+
"research",
|
1138 |
+
"country",
|
1139 |
+
"dare",
|
1140 |
+
"hope",
|
1141 |
+
"whereas",
|
1142 |
+
"stretch",
|
1143 |
+
"library",
|
1144 |
+
"if",
|
1145 |
+
"delay",
|
1146 |
+
"college",
|
1147 |
+
"plastic",
|
1148 |
+
"book",
|
1149 |
+
"present",
|
1150 |
+
"use",
|
1151 |
+
"worry",
|
1152 |
+
"champion",
|
1153 |
+
"goal",
|
1154 |
+
"economy",
|
1155 |
+
"march",
|
1156 |
+
"election",
|
1157 |
+
"reflection",
|
1158 |
+
"midnight",
|
1159 |
+
"slide",
|
1160 |
+
"inflation",
|
1161 |
+
"action",
|
1162 |
+
"challenge",
|
1163 |
+
"guitar",
|
1164 |
+
"coast",
|
1165 |
+
"apple",
|
1166 |
+
"campaign",
|
1167 |
+
"field",
|
1168 |
+
"jacket",
|
1169 |
+
"sense",
|
1170 |
+
"way",
|
1171 |
+
"visual",
|
1172 |
+
"remove",
|
1173 |
+
"weather",
|
1174 |
+
"trash",
|
1175 |
+
"cable",
|
1176 |
+
"regret",
|
1177 |
+
"buddy",
|
1178 |
+
"beach",
|
1179 |
+
"historian",
|
1180 |
+
"courage",
|
1181 |
+
"sympathy",
|
1182 |
+
"truck",
|
1183 |
+
"tension",
|
1184 |
+
"permit",
|
1185 |
+
"nose",
|
1186 |
+
"bed",
|
1187 |
+
"son",
|
1188 |
+
"person",
|
1189 |
+
"base",
|
1190 |
+
"meat",
|
1191 |
+
"usual",
|
1192 |
+
"air",
|
1193 |
+
"meeting",
|
1194 |
+
"worth",
|
1195 |
+
"game",
|
1196 |
+
"independence",
|
1197 |
+
"physical",
|
1198 |
+
"brief",
|
1199 |
+
"play",
|
1200 |
+
"raise",
|
1201 |
+
"board",
|
1202 |
+
"she",
|
1203 |
+
"key",
|
1204 |
+
"writing",
|
1205 |
+
"pick",
|
1206 |
+
"command",
|
1207 |
+
"party",
|
1208 |
+
"yesterday",
|
1209 |
+
"spring",
|
1210 |
+
"candidate",
|
1211 |
+
"physics",
|
1212 |
+
"university",
|
1213 |
+
"concern",
|
1214 |
+
"development",
|
1215 |
+
"change",
|
1216 |
+
"string",
|
1217 |
+
"target",
|
1218 |
+
"instance",
|
1219 |
+
"room",
|
1220 |
+
"bitter",
|
1221 |
+
"bird",
|
1222 |
+
"football",
|
1223 |
+
"normal",
|
1224 |
+
"split",
|
1225 |
+
"impression",
|
1226 |
+
"wood",
|
1227 |
+
"long",
|
1228 |
+
"meaning",
|
1229 |
+
"stock",
|
1230 |
+
"cap",
|
1231 |
+
"leadership",
|
1232 |
+
"media",
|
1233 |
+
"ambition",
|
1234 |
+
"fishing",
|
1235 |
+
"essay",
|
1236 |
+
"salad",
|
1237 |
+
"repair",
|
1238 |
+
"today",
|
1239 |
+
"designer",
|
1240 |
+
"night",
|
1241 |
+
"bank",
|
1242 |
+
"drawing",
|
1243 |
+
"inevitable",
|
1244 |
+
"phase",
|
1245 |
+
"vast",
|
1246 |
+
"chip",
|
1247 |
+
"anger",
|
1248 |
+
"switch",
|
1249 |
+
"cry",
|
1250 |
+
"twist",
|
1251 |
+
"personality",
|
1252 |
+
"attempt",
|
1253 |
+
"storage",
|
1254 |
+
"being",
|
1255 |
+
"preparation",
|
1256 |
+
"bat",
|
1257 |
+
"selection",
|
1258 |
+
"white",
|
1259 |
+
"technology",
|
1260 |
+
"contract",
|
1261 |
+
"side",
|
1262 |
+
"section",
|
1263 |
+
"station",
|
1264 |
+
"till",
|
1265 |
+
"structure",
|
1266 |
+
"tongue",
|
1267 |
+
"taste",
|
1268 |
+
"truth",
|
1269 |
+
"difficulty",
|
1270 |
+
"group",
|
1271 |
+
"limit",
|
1272 |
+
"main",
|
1273 |
+
"move",
|
1274 |
+
"feeling",
|
1275 |
+
"light",
|
1276 |
+
"example",
|
1277 |
+
"mission",
|
1278 |
+
"might",
|
1279 |
+
"wait",
|
1280 |
+
"wheel",
|
1281 |
+
"shop",
|
1282 |
+
"host",
|
1283 |
+
"classic",
|
1284 |
+
"alternative",
|
1285 |
+
"cause",
|
1286 |
+
"agent",
|
1287 |
+
"consist",
|
1288 |
+
"table",
|
1289 |
+
"airline",
|
1290 |
+
"text",
|
1291 |
+
"pool",
|
1292 |
+
"craft",
|
1293 |
+
"range",
|
1294 |
+
"fuel",
|
1295 |
+
"tool",
|
1296 |
+
"partner",
|
1297 |
+
"load",
|
1298 |
+
"entrance",
|
1299 |
+
"deposit",
|
1300 |
+
"hate",
|
1301 |
+
"article",
|
1302 |
+
"video",
|
1303 |
+
"summer",
|
1304 |
+
"feature",
|
1305 |
+
"extreme",
|
1306 |
+
"mobile",
|
1307 |
+
"hospital",
|
1308 |
+
"flight",
|
1309 |
+
"fall",
|
1310 |
+
"pension",
|
1311 |
+
"piano",
|
1312 |
+
"fail",
|
1313 |
+
"result",
|
1314 |
+
"rub",
|
1315 |
+
"gap",
|
1316 |
+
"system",
|
1317 |
+
"report",
|
1318 |
+
"suck",
|
1319 |
+
"ordinary",
|
1320 |
+
"wind",
|
1321 |
+
"nerve",
|
1322 |
+
"ask",
|
1323 |
+
"shine",
|
1324 |
+
"note",
|
1325 |
+
"line",
|
1326 |
+
"mom",
|
1327 |
+
"perception",
|
1328 |
+
"brother",
|
1329 |
+
"reference",
|
1330 |
+
"bend",
|
1331 |
+
"charge",
|
1332 |
+
"treat",
|
1333 |
+
"trick",
|
1334 |
+
"term",
|
1335 |
+
"homework",
|
1336 |
+
"bake",
|
1337 |
+
"bid",
|
1338 |
+
"status",
|
1339 |
+
"project",
|
1340 |
+
"strategy",
|
1341 |
+
"orange",
|
1342 |
+
"let",
|
1343 |
+
"enthusiasm",
|
1344 |
+
"parent",
|
1345 |
+
"concentrate",
|
1346 |
+
"device",
|
1347 |
+
"travel",
|
1348 |
+
"poetry",
|
1349 |
+
"business",
|
1350 |
+
"society",
|
1351 |
+
"kiss",
|
1352 |
+
"end",
|
1353 |
+
"vegetable",
|
1354 |
+
"employ",
|
1355 |
+
"schedule",
|
1356 |
+
"hour",
|
1357 |
+
"brave",
|
1358 |
+
"focus",
|
1359 |
+
"process",
|
1360 |
+
"movie",
|
1361 |
+
"illegal",
|
1362 |
+
"general",
|
1363 |
+
"coffee",
|
1364 |
+
"ad",
|
1365 |
+
"highway",
|
1366 |
+
"chemistry",
|
1367 |
+
"psychology",
|
1368 |
+
"hire",
|
1369 |
+
"bell",
|
1370 |
+
"conference",
|
1371 |
+
"relief",
|
1372 |
+
"show",
|
1373 |
+
"neat",
|
1374 |
+
"funny",
|
1375 |
+
"weight",
|
1376 |
+
"quality",
|
1377 |
+
"club",
|
1378 |
+
"daughter",
|
1379 |
+
"zone",
|
1380 |
+
"touch",
|
1381 |
+
"tonight",
|
1382 |
+
"shock",
|
1383 |
+
"burn",
|
1384 |
+
"excuse",
|
1385 |
+
"name",
|
1386 |
+
"survey",
|
1387 |
+
"landscape",
|
1388 |
+
"advance",
|
1389 |
+
"satisfaction",
|
1390 |
+
"bread",
|
1391 |
+
"disaster",
|
1392 |
+
"item",
|
1393 |
+
"hat",
|
1394 |
+
"prior",
|
1395 |
+
"shopping",
|
1396 |
+
"visit",
|
1397 |
+
"east",
|
1398 |
+
"photo",
|
1399 |
+
"home",
|
1400 |
+
"idea",
|
1401 |
+
"father",
|
1402 |
+
"comparison",
|
1403 |
+
"cat",
|
1404 |
+
"pipe",
|
1405 |
+
"winner",
|
1406 |
+
"count",
|
1407 |
+
"lake",
|
1408 |
+
"fight",
|
1409 |
+
"prize",
|
1410 |
+
"foundation",
|
1411 |
+
"dog",
|
1412 |
+
"keep",
|
1413 |
+
"ideal",
|
1414 |
+
"fan",
|
1415 |
+
"struggle",
|
1416 |
+
"peak",
|
1417 |
+
"safety",
|
1418 |
+
"solution",
|
1419 |
+
"hell",
|
1420 |
+
"conclusion",
|
1421 |
+
"population",
|
1422 |
+
"strain",
|
1423 |
+
"alarm",
|
1424 |
+
"measurement",
|
1425 |
+
"second",
|
1426 |
+
"train",
|
1427 |
+
"race",
|
1428 |
+
"due",
|
1429 |
+
"insurance",
|
1430 |
+
"boss",
|
1431 |
+
"tree",
|
1432 |
+
"monitor",
|
1433 |
+
"sick",
|
1434 |
+
"course",
|
1435 |
+
"drag",
|
1436 |
+
"appointment",
|
1437 |
+
"slice",
|
1438 |
+
"still",
|
1439 |
+
"care",
|
1440 |
+
"patience",
|
1441 |
+
"rich",
|
1442 |
+
"escape",
|
1443 |
+
"emotion",
|
1444 |
+
"royal",
|
1445 |
+
"female",
|
1446 |
+
"childhood",
|
1447 |
+
"government",
|
1448 |
+
"picture",
|
1449 |
+
"will",
|
1450 |
+
"sock",
|
1451 |
+
"big",
|
1452 |
+
"gate",
|
1453 |
+
"oil",
|
1454 |
+
"cross",
|
1455 |
+
"pin",
|
1456 |
+
"improvement",
|
1457 |
+
"championship",
|
1458 |
+
"silly",
|
1459 |
+
"help",
|
1460 |
+
"sky",
|
1461 |
+
"pitch",
|
1462 |
+
"man",
|
1463 |
+
"diamond",
|
1464 |
+
"most",
|
1465 |
+
"transition",
|
1466 |
+
"work",
|
1467 |
+
"science",
|
1468 |
+
"committee",
|
1469 |
+
"moment",
|
1470 |
+
"fix",
|
1471 |
+
"teaching",
|
1472 |
+
"dig",
|
1473 |
+
"specialist",
|
1474 |
+
"complex",
|
1475 |
+
"guide",
|
1476 |
+
"people",
|
1477 |
+
"dead",
|
1478 |
+
"voice",
|
1479 |
+
"original",
|
1480 |
+
"break",
|
1481 |
+
"topic",
|
1482 |
+
"data",
|
1483 |
+
"degree",
|
1484 |
+
"reading",
|
1485 |
+
"recording",
|
1486 |
+
"bunch",
|
1487 |
+
"reach",
|
1488 |
+
"judgment",
|
1489 |
+
"lie",
|
1490 |
+
"regular",
|
1491 |
+
"set",
|
1492 |
+
"painting",
|
1493 |
+
"mode",
|
1494 |
+
"list",
|
1495 |
+
"player",
|
1496 |
+
"bear",
|
1497 |
+
"north",
|
1498 |
+
"wonder",
|
1499 |
+
"carpet",
|
1500 |
+
"heavy",
|
1501 |
+
"officer",
|
1502 |
+
"negative",
|
1503 |
+
"clock",
|
1504 |
+
"unique",
|
1505 |
+
"baby",
|
1506 |
+
"pain",
|
1507 |
+
"assumption",
|
1508 |
+
"disk",
|
1509 |
+
"iron",
|
1510 |
+
"bill",
|
1511 |
+
"drawer",
|
1512 |
+
"look",
|
1513 |
+
"double",
|
1514 |
+
"mistake",
|
1515 |
+
"finish",
|
1516 |
+
"future",
|
1517 |
+
"brilliant",
|
1518 |
+
"contact",
|
1519 |
+
"math",
|
1520 |
+
"rice",
|
1521 |
+
"leave",
|
1522 |
+
"restaurant",
|
1523 |
+
"discount",
|
1524 |
+
"sex",
|
1525 |
+
"virus",
|
1526 |
+
"bit",
|
1527 |
+
"trust",
|
1528 |
+
"event",
|
1529 |
+
"wear",
|
1530 |
+
"juice",
|
1531 |
+
"failure",
|
1532 |
+
"bug",
|
1533 |
+
"context",
|
1534 |
+
"mud",
|
1535 |
+
"whole",
|
1536 |
+
"wrap",
|
1537 |
+
"intention",
|
1538 |
+
"draft",
|
1539 |
+
"pressure",
|
1540 |
+
"cake",
|
1541 |
+
"dark",
|
1542 |
+
"explanation",
|
1543 |
+
"space",
|
1544 |
+
"angle",
|
1545 |
+
"word",
|
1546 |
+
"efficiency",
|
1547 |
+
"management",
|
1548 |
+
"habit",
|
1549 |
+
"star",
|
1550 |
+
"chance",
|
1551 |
+
"finding",
|
1552 |
+
"transportation",
|
1553 |
+
"stand",
|
1554 |
+
"criticism",
|
1555 |
+
"flow",
|
1556 |
+
"door",
|
1557 |
+
"injury",
|
1558 |
+
"insect",
|
1559 |
+
"surprise",
|
1560 |
+
"apartment",
|
1561 |
+
] # pylint: disable=line-too-long
|
1562 |
+
|
1563 |
+
# ISO 639-1 codes to language names.
|
1564 |
+
LANGUAGE_CODES = immutabledict.immutabledict(
|
1565 |
+
{
|
1566 |
+
"en": "English",
|
1567 |
+
"es": "Spanish",
|
1568 |
+
"pt": "Portuguese",
|
1569 |
+
"ar": "Arabic",
|
1570 |
+
"hi": "Hindi",
|
1571 |
+
"fr": "French",
|
1572 |
+
"ru": "Russian",
|
1573 |
+
"de": "German",
|
1574 |
+
"ja": "Japanese",
|
1575 |
+
"it": "Italian",
|
1576 |
+
"bn": "Bengali",
|
1577 |
+
"uk": "Ukrainian",
|
1578 |
+
"th": "Thai",
|
1579 |
+
"ur": "Urdu",
|
1580 |
+
"ta": "Tamil",
|
1581 |
+
"te": "Telugu",
|
1582 |
+
"bg": "Bulgarian",
|
1583 |
+
"ko": "Korean",
|
1584 |
+
"pl": "Polish",
|
1585 |
+
"he": "Hebrew",
|
1586 |
+
"fa": "Persian",
|
1587 |
+
"vi": "Vietnamese",
|
1588 |
+
"ne": "Nepali",
|
1589 |
+
"sw": "Swahili",
|
1590 |
+
"kn": "Kannada",
|
1591 |
+
"mr": "Marathi",
|
1592 |
+
"gu": "Gujarati",
|
1593 |
+
"pa": "Punjabi",
|
1594 |
+
"ml": "Malayalam",
|
1595 |
+
"fi": "Finnish",
|
1596 |
+
}
|
1597 |
+
)
|
1598 |
+
|
1599 |
+
_ALPHABETS = "([A-Za-z])"
|
1600 |
+
_PREFIXES = "(Mr|St|Mrs|Ms|Dr)[.]"
|
1601 |
+
_SUFFIXES = "(Inc|Ltd|Jr|Sr|Co)"
|
1602 |
+
_STARTERS = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
|
1603 |
+
_ACRONYMS = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
|
1604 |
+
_WEBSITES = "[.](com|net|org|io|gov|edu|me)"
|
1605 |
+
_DIGITS = "([0-9])"
|
1606 |
+
_MULTIPLE_DOTS = r"\.{2,}"
|
1607 |
+
|
1608 |
+
|
1609 |
+
def split_into_sentences(text):
|
1610 |
+
"""Split the text into sentences.
|
1611 |
+
|
1612 |
+
Args:
|
1613 |
+
text: A string that consists of more than or equal to one sentences.
|
1614 |
+
|
1615 |
+
Returns:
|
1616 |
+
A list of strings where each string is a sentence.
|
1617 |
+
"""
|
1618 |
+
text = " " + text + " "
|
1619 |
+
text = text.replace("\n", " ")
|
1620 |
+
text = re.sub(_PREFIXES, "\\1<prd>", text)
|
1621 |
+
text = re.sub(_WEBSITES, "<prd>\\1", text)
|
1622 |
+
text = re.sub(_DIGITS + "[.]" + _DIGITS, "\\1<prd>\\2", text)
|
1623 |
+
text = re.sub(
|
1624 |
+
_MULTIPLE_DOTS,
|
1625 |
+
lambda match: "<prd>" * len(match.group(0)) + "<stop>",
|
1626 |
+
text,
|
1627 |
+
)
|
1628 |
+
if "Ph.D" in text:
|
1629 |
+
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
|
1630 |
+
text = re.sub(r"\s" + _ALPHABETS + "[.] ", " \\1<prd> ", text)
|
1631 |
+
text = re.sub(_ACRONYMS + " " + _STARTERS, "\\1<stop> \\2", text)
|
1632 |
+
text = re.sub(
|
1633 |
+
_ALPHABETS + "[.]" + _ALPHABETS + "[.]" + _ALPHABETS + "[.]",
|
1634 |
+
"\\1<prd>\\2<prd>\\3<prd>",
|
1635 |
+
text,
|
1636 |
+
)
|
1637 |
+
text = re.sub(_ALPHABETS + "[.]" + _ALPHABETS + "[.]", "\\1<prd>\\2<prd>", text)
|
1638 |
+
text = re.sub(" " + _SUFFIXES + "[.] " + _STARTERS, " \\1<stop> \\2", text)
|
1639 |
+
text = re.sub(" " + _SUFFIXES + "[.]", " \\1<prd>", text)
|
1640 |
+
text = re.sub(" " + _ALPHABETS + "[.]", " \\1<prd>", text)
|
1641 |
+
if "”" in text:
|
1642 |
+
text = text.replace(".”", "”.")
|
1643 |
+
if '"' in text:
|
1644 |
+
text = text.replace('."', '".')
|
1645 |
+
if "!" in text:
|
1646 |
+
text = text.replace('!"', '"!')
|
1647 |
+
if "?" in text:
|
1648 |
+
text = text.replace('?"', '"?')
|
1649 |
+
text = text.replace(".", ".<stop>")
|
1650 |
+
text = text.replace("?", "?<stop>")
|
1651 |
+
text = text.replace("!", "!<stop>")
|
1652 |
+
text = text.replace("<prd>", ".")
|
1653 |
+
sentences = text.split("<stop>")
|
1654 |
+
sentences = [s.strip() for s in sentences]
|
1655 |
+
if sentences and not sentences[-1]:
|
1656 |
+
sentences = sentences[:-1]
|
1657 |
+
return sentences
|
1658 |
+
|
1659 |
+
|
1660 |
+
def count_words(text):
|
1661 |
+
"""Counts the number of words."""
|
1662 |
+
tokenizer = nltk.tokenize.RegexpTokenizer(r"\w+")
|
1663 |
+
tokens = tokenizer.tokenize(text)
|
1664 |
+
num_words = len(tokens)
|
1665 |
+
return num_words
|
1666 |
+
|
1667 |
+
|
1668 |
+
@functools.lru_cache(maxsize=None)
|
1669 |
+
def _get_sentence_tokenizer():
|
1670 |
+
return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
|
1671 |
+
|
1672 |
+
|
1673 |
+
def count_sentences(text):
|
1674 |
+
"""Count the number of sentences."""
|
1675 |
+
tokenizer = _get_sentence_tokenizer()
|
1676 |
+
tokenized_sentences = tokenizer.tokenize(text)
|
1677 |
+
return len(tokenized_sentences)
|
1678 |
+
|
1679 |
+
|
1680 |
+
def generate_keywords(num_keywords):
|
1681 |
+
"""Randomly generates a few keywords."""
|
1682 |
+
return random.sample(WORD_LIST, k=num_keywords)
|
lm-evaluation/lm_eval/tasks/ifeval/utils.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
from typing import Dict, Optional, Union
|
3 |
+
|
4 |
+
from lm_eval.tasks.ifeval import instructions_registry
|
5 |
+
from lm_eval.utils import eval_logger
|
6 |
+
|
7 |
+
|
8 |
+
@dataclasses.dataclass
|
9 |
+
class InputExample:
|
10 |
+
key: int
|
11 |
+
instruction_id_list: list[str]
|
12 |
+
prompt: str
|
13 |
+
kwargs: list[Dict[str, Optional[Union[str, int]]]]
|
14 |
+
|
15 |
+
|
16 |
+
@dataclasses.dataclass
|
17 |
+
class OutputExample:
|
18 |
+
instruction_id_list: list[str]
|
19 |
+
prompt: str
|
20 |
+
response: str
|
21 |
+
follow_all_instructions: bool
|
22 |
+
follow_instruction_list: list[bool]
|
23 |
+
|
24 |
+
|
25 |
+
def test_instruction_following_strict(
|
26 |
+
inp,
|
27 |
+
response,
|
28 |
+
):
|
29 |
+
"""Tests response to see if instructions are followed."""
|
30 |
+
instruction_list = inp.instruction_id_list
|
31 |
+
is_following_list = []
|
32 |
+
|
33 |
+
for index, instruction_id in enumerate(instruction_list):
|
34 |
+
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
35 |
+
instruction = instruction_cls(instruction_id)
|
36 |
+
|
37 |
+
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
38 |
+
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
39 |
+
instruction.build_description(**kwargs)
|
40 |
+
args = instruction.get_instruction_args()
|
41 |
+
if args and "prompt" in args:
|
42 |
+
instruction.build_description(prompt=inp.prompt)
|
43 |
+
|
44 |
+
if response.strip() and instruction.check_following(response):
|
45 |
+
is_following_list.append(True)
|
46 |
+
else:
|
47 |
+
is_following_list.append(False)
|
48 |
+
|
49 |
+
return OutputExample(
|
50 |
+
instruction_id_list=inp.instruction_id_list,
|
51 |
+
prompt=inp.prompt,
|
52 |
+
response=response,
|
53 |
+
follow_all_instructions=all(is_following_list),
|
54 |
+
follow_instruction_list=is_following_list,
|
55 |
+
)
|
56 |
+
|
57 |
+
|
58 |
+
def test_instruction_following_loose(
|
59 |
+
inp,
|
60 |
+
response,
|
61 |
+
):
|
62 |
+
"""Tests response for an upper bound for following instructions."""
|
63 |
+
r = response.split("\n")
|
64 |
+
response_remove_first = "\n".join(r[1:]).strip()
|
65 |
+
response_remove_last = "\n".join(r[:-1]).strip()
|
66 |
+
response_remove_both = "\n".join(r[1:-1]).strip()
|
67 |
+
revised_response = response.replace("*", "")
|
68 |
+
revised_response_remove_first = response_remove_first.replace("*", "")
|
69 |
+
revised_response_remove_last = response_remove_last.replace("*", "")
|
70 |
+
revised_response_remove_both = response_remove_both.replace("*", "")
|
71 |
+
all_responses = [
|
72 |
+
response,
|
73 |
+
revised_response,
|
74 |
+
response_remove_first,
|
75 |
+
response_remove_last,
|
76 |
+
response_remove_both,
|
77 |
+
revised_response_remove_first,
|
78 |
+
revised_response_remove_last,
|
79 |
+
revised_response_remove_both,
|
80 |
+
]
|
81 |
+
instruction_list = inp.instruction_id_list
|
82 |
+
is_following_list = []
|
83 |
+
|
84 |
+
for index, instruction_id in enumerate(instruction_list):
|
85 |
+
instruction_cls = instructions_registry.INSTRUCTION_DICT[instruction_id]
|
86 |
+
instruction = instruction_cls(instruction_id)
|
87 |
+
|
88 |
+
# Remove None values from kwargs to avoid unexpected keyword argument errors in build_description method.
|
89 |
+
kwargs = {k: v for k, v in inp.kwargs[index].items() if v}
|
90 |
+
instruction.build_description(**kwargs)
|
91 |
+
args = instruction.get_instruction_args()
|
92 |
+
if args and "prompt" in args:
|
93 |
+
instruction.build_description(prompt=inp.prompt)
|
94 |
+
|
95 |
+
is_following = False
|
96 |
+
for r in all_responses:
|
97 |
+
if r.strip() and instruction.check_following(r):
|
98 |
+
is_following = True
|
99 |
+
break
|
100 |
+
|
101 |
+
is_following_list.append(is_following)
|
102 |
+
|
103 |
+
return OutputExample(
|
104 |
+
instruction_id_list=inp.instruction_id_list,
|
105 |
+
prompt=inp.prompt,
|
106 |
+
response=response,
|
107 |
+
follow_all_instructions=all(is_following_list),
|
108 |
+
follow_instruction_list=is_following_list,
|
109 |
+
)
|
110 |
+
|
111 |
+
|
112 |
+
def process_results(doc, results):
|
113 |
+
eval_logger.warning(
|
114 |
+
"This task is meant for chat-finetuned models, and may not give meaningful results for models other than `openai` or `anthropic` if `doc_to_text` in its YAML is not wrapped in the appropriate chat template string. This warning will be removed when chat templating support is added natively to local models"
|
115 |
+
)
|
116 |
+
|
117 |
+
inp = InputExample(
|
118 |
+
key=doc["key"],
|
119 |
+
instruction_id_list=doc["instruction_id_list"],
|
120 |
+
prompt=doc["prompt"],
|
121 |
+
kwargs=doc["kwargs"],
|
122 |
+
)
|
123 |
+
response = results[0]
|
124 |
+
|
125 |
+
out_strict = test_instruction_following_strict(inp, response)
|
126 |
+
out_loose = test_instruction_following_loose(inp, response)
|
127 |
+
|
128 |
+
return {
|
129 |
+
"prompt_level_strict_acc": out_strict.follow_all_instructions,
|
130 |
+
"inst_level_strict_acc": out_strict.follow_instruction_list,
|
131 |
+
"prompt_level_loose_acc": out_loose.follow_all_instructions,
|
132 |
+
"inst_level_loose_acc": out_loose.follow_instruction_list,
|
133 |
+
}
|
134 |
+
|
135 |
+
|
136 |
+
def agg_inst_level_acc(items):
|
137 |
+
flat_items = [item for sublist in items for item in sublist]
|
138 |
+
inst_level_acc = sum(flat_items) / len(flat_items)
|
139 |
+
return inst_level_acc
|
lm-evaluation/lm_eval/tasks/tmmluplus/README.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TMMLU+
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `An Improved Traditional Chinese Evaluation Suite for Foundation Model`
|
6 |
+
|
7 |
+
Abstract: `We present TMMLU+, a comprehensive dataset designed for the Traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset with 66 subjects from elementary to professional level. Compared to its predecessor, TMMLU, TMMLU+ is six times larger and boasts a more balanced subject distribution. We included benchmark results in TMMLU+ from closed-source models and 24 open-weight Chinese large language models of parameters ranging from 1.8B to 72B. Our findings reveal that Traditional Chinese models still trail behind their Simplified Chinese counterparts. Additionally, current large language models have yet to outperform human performance in average scores. We publicly release our dataset and the corresponding benchmark source code.`
|
8 |
+
|
9 |
+
|
10 |
+
Homepage: [https://huggingface.co/datasets/ikala/tmmluplus](https://huggingface.co/datasets/ikala/tmmluplus)
|
11 |
+
|
12 |
+
|
13 |
+
### Citation
|
14 |
+
|
15 |
+
```
|
16 |
+
@article{ikala2024improved,
|
17 |
+
title={An Improved Traditional Chinese Evaluation Suite for Foundation Model},
|
18 |
+
author={Tam, Zhi-Rui and Pai, Ya-Ting and Lee, Yen-Wei and Cheng, Sega and Shuai, Hong-Han},
|
19 |
+
journal={arXiv preprint arXiv:2403.01858},
|
20 |
+
year={2024}
|
21 |
+
}
|
22 |
+
```
|
23 |
+
|
24 |
+
### Groups and Tasks
|
25 |
+
|
26 |
+
#### Groups
|
27 |
+
|
28 |
+
* `tmmluplus`: `The dataset comprises 22,690 multiple-choice questions from 66 subjects ranging from primary to professional level. `
|
29 |
+
|
30 |
+
#### Tasks
|
31 |
+
|
32 |
+
The following tasks evaluate subjects in the TMMLU+ dataset using loglikelihood-based multiple-choice scoring:
|
33 |
+
|
34 |
+
* `tmmluplus_{subject_english}`
|
35 |
+
|
36 |
+
### Checklist
|
37 |
+
|
38 |
+
For adding novel benchmarks/datasets to the library:
|
39 |
+
* [x] Is the task an existing benchmark in the literature?
|
40 |
+
* [x] Have you referenced the original paper that introduced the task?
|
41 |
+
* [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
42 |
+
|
43 |
+
|
44 |
+
If other tasks on this dataset are already supported:
|
45 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
46 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
47 |
+
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "accounting"
|
2 |
+
"description": "以下為會計學的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_other"
|
4 |
+
"group_alias": "other"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_accounting"
|
7 |
+
"task_alias": "accounting"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "advance_chemistry"
|
2 |
+
"description": "以下為化學的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_STEM"
|
4 |
+
"group_alias": "STEM"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_advance_chemistry"
|
7 |
+
"task_alias": "advance chemistry"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "basic_medical_science"
|
2 |
+
"description": "以下為基礎醫學的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_STEM"
|
4 |
+
"group_alias": "STEM"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_basic_medical_science"
|
7 |
+
"task_alias": "basic medical science"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "chinese_language_and_literature"
|
2 |
+
"description": "以下為國文的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_social_sciences"
|
4 |
+
"group_alias": "social sciences"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_chinese_language_and_literature"
|
7 |
+
"task_alias": "chinese language and literature"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_science"
|
2 |
+
"description": "以下為資訊工程的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_STEM"
|
4 |
+
"group_alias": "STEM"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_computer_science"
|
7 |
+
"task_alias": "computer science"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "educational_psychology"
|
2 |
+
"description": "以下為教育心理的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_social_sciences"
|
4 |
+
"group_alias": "social sciences"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_educational_psychology"
|
7 |
+
"task_alias": "educational psychology"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "engineering_math"
|
2 |
+
"description": "以下為工程數學的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_STEM"
|
4 |
+
"group_alias": "STEM"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_engineering_math"
|
7 |
+
"task_alias": "engineering math"
|
lm-evaluation/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "fire_science"
|
2 |
+
"description": "以下為火災學的單選題,請提供正確答案的選項。\n\n"
|
3 |
+
"group": "tmmluplus_other"
|
4 |
+
"group_alias": "other"
|
5 |
+
"include": "_default_template_yaml"
|
6 |
+
"task": "tmmluplus_fire_science"
|
7 |
+
"task_alias": "fire science"
|