applied-ai-018 commited on
Commit
9605203
·
verified ·
1 Parent(s): d1396f0

Add files using upload-large-folder tool

Browse files
Files changed (44) hide show
  1. data/catalogue/catalogue-gptneo-jsonl-to-meg-ds.slurm +50 -0
  2. data/catalogue/catalogue-jsonl-to-meg-ds.slurm +50 -0
  3. data/catalogue/convert_to_jsonl.slurm +31 -0
  4. data/catalogue/get_sizes.py +58 -0
  5. data/catalogue/get_sizes.slurm +34 -0
  6. data/catalogue/load_ratios_meg_ds_format.py +54 -0
  7. data/catalogue/merge_dataset_per_language.backup.py +76 -0
  8. data/catalogue/merge_dataset_per_languages.backup.slurm +60 -0
  9. data/catalogue/merge_dataset_per_languages.slurm +38 -0
  10. data/catalogue/merge_dataset_per_languages_v3.slurm +38 -0
  11. data/catalogue/merge_nigercongo.slurm +38 -0
  12. data/catalogue/oscar-piiv2-jsonl-to-meg-ds.slurm +50 -0
  13. data/catalogue/sample_and_convert_to_jsonl.py +650 -0
  14. data/catalogue/training_dataset_ratios.json +198 -0
  15. data/catalogue/training_dataset_ratios_batch_0.json +0 -0
  16. data/catalogue/training_dataset_ratios_merged_nigercongo.json +114 -0
  17. data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json +114 -0
  18. data/mc4/README.md +26 -0
  19. data/openwebtext/openwebtext-to-jsonl.py +28 -0
  20. data/oscar-multilingual/README.md +218 -0
  21. data/oscar-multilingual/download-oscars.py +50 -0
  22. data/oscar-multilingual/download-oscars.slurm +18 -0
  23. data/oscar-multilingual/oscar-fast-shuffle.slurm +27 -0
  24. data/oscar-multilingual/oscar-jsonl-to-meg.sh +7 -0
  25. data/oscar-multilingual/oscar-jsonl-to-meg.slurm +30 -0
  26. data/oscar-multilingual/oscar-meg-gpt2-merge.slurm +24 -0
  27. data/oscar-multilingual/oscar-multilingual-to-jsonl.py +106 -0
  28. data/oscar-multilingual/oscar-to-backup-tgz.slurm +26 -0
  29. data/p3/prepare_p3.py +366 -0
  30. data/p3/prepare_p3.slurm +19 -0
  31. data/sampling_probs/calc_iterator_prob.py +132 -0
  32. data/sampling_probs/calc_iterator_prob.sh +12 -0
  33. data/sampling_probs/new_to_old_format_data_path.py +70 -0
  34. data/xp3/download_all_datasets.py +162 -0
  35. data/xp3/p3_jsonl_to_meg_bos.slurm +66 -0
  36. data/xp3/p3_jsonl_to_meg_eos.slurm +66 -0
  37. data/xp3/prepare_xp3_train.py +1194 -0
  38. data/xp3/prepare_xp3_train.slurm +18 -0
  39. data/xp3/update_jsonls.py +21 -0
  40. data/xp3/xp3_jsonl_to_meg.slurm +150 -0
  41. data/xp3/xp3cappedmixed_jsonl_to_meg.slurm +104 -0
  42. data/xp3/xp3mixed_jsonl_to_meg.slurm +102 -0
  43. inference/README.md +15 -0
  44. inference/modeling_gpt2_alibi_prefix_lm.py +1750 -0
data/catalogue/catalogue-gptneo-jsonl-to-meg-ds.slurm ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-gptneo-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/catalogue-gptneo-jsonl-to-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-43
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # we need transformers on master as of writing this to get the Neo20b tokenizer
17
+ conda activate teven-tests
18
+
19
+ # ======= Generate meg-ds file ======
20
+
21
+ DATASET_PATHS=($(ls -d $six_ALL_CCFRWORK/bigscience-training/jsonls/jsonl_v2/en/*.jsonl))
22
+ DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}
23
+
24
+ TOKENIZER_NAME_OR_PATH=EleutherAI/gpt-neox-20b
25
+
26
+ DATASET_NAME_WITH_JSONL=$(basename $DATASET_PATH)
27
+ DATASET_NAME=${DATASET_NAME_WITH_JSONL:0:-6}
28
+ LANG=$(basename $(dirname $DATASET_PATH))
29
+ SAVE_MEG_DS_DATASET=$six_ALL_CCFRSCRATCH/bigscience-datasets/gptneo-tokenizations/$LANG/"$DATASET_NAME"/meg_ds_"${TOKENIZER_NAME_OR_PATH//\//_}"
30
+
31
+ mkdir -p $(dirname $SAVE_MEG_DS_DATASET)
32
+
33
+ if [[ -f "$SAVE_MEG_DS_DATASET"_text_document.bin ]];
34
+ then
35
+ echo "$SAVE_MEG_DS_DATASET exists."
36
+ exit 0
37
+ fi
38
+
39
+ export HF_DATASETS_OFFLINE=1
40
+ export TRANSFORMERS_OFFLINE=1
41
+
42
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
43
+ /usr/bin/time -v python tools/preprocess_data_many_cores.py \
44
+ --input $DATASET_PATH \
45
+ --output-prefix $SAVE_MEG_DS_DATASET \
46
+ --dataset-impl mmap \
47
+ --tokenizer-type PretrainedFromHF \
48
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
49
+ --append-eod \
50
+ --workers 40
data/catalogue/catalogue-jsonl-to-meg-ds.slurm ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/catalogue-jsonl-to-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-497
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # We need a specific installation of tokenizers so that it works with bytefallback
17
+ conda activate thomas_data_tooling
18
+
19
+ # ======= Generate meg-ds file ======
20
+
21
+ DATASET_PATHS=($(ls -d $six_ALL_CCFRWORK/bigscience-training/jsonls/**/**/*.jsonl))
22
+ DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}
23
+
24
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
25
+
26
+ DATASET_NAME_WITH_JSONL=$(basename $DATASET_PATH)
27
+ DATASET_NAME=${DATASET_NAME_WITH_JSONL:0:-6}
28
+ LANG=$(basename $(dirname $DATASET_PATH))
29
+ SAVE_MEG_DS_DATASET=$six_ALL_CCFRSCRATCH/bigscience-datasets/re-tokenizations/$LANG/"$DATASET_NAME"/meg_ds_"${TOKENIZER_NAME_OR_PATH//\//_}"
30
+
31
+ mkdir -p $(dirname $SAVE_MEG_DS_DATASET)
32
+
33
+ if [[ -f "$SAVE_MEG_DS_DATASET"_text_document.bin ]];
34
+ then
35
+ echo "$SAVE_MEG_DS_DATASET exists."
36
+ exit 0
37
+ fi
38
+
39
+ export HF_DATASETS_OFFLINE=1
40
+ export TRANSFORMERS_OFFLINE=1
41
+
42
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
43
+ /usr/bin/time -v python tools/preprocess_data_many_cores.py \
44
+ --input $DATASET_PATH \
45
+ --output-prefix $SAVE_MEG_DS_DATASET \
46
+ --dataset-impl mmap \
47
+ --tokenizer-type PretrainedFromHF \
48
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
49
+ --append-eod \
50
+ --workers 40
data/catalogue/convert_to_jsonl.slurm ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=convert_datasets_to_jsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/convert_to_jsonl/%x-%j.out # output file name
9
+ #SBATCH --array=0-501
10
+ #SBATCH --account=six@cpu
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ conda activate thomas_data_tooling
17
+
18
+ # ======= Generate json file ======
19
+
20
+ DATASET_PATHS=($(ls -d $six_ALL_CCFRSCRATCH/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/* /gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/*))
21
+ DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}
22
+
23
+ BIGSCIENCE_REPO=$WORK/code/big_science/bigscience
24
+ SAVE_JSON_DATASET_PATH_PREFIX=$six_ALL_CCFRSCRATCH/bigscience-datasets/jsonl_v2
25
+ mkdir -p $SAVE_JSON_DATASET_PATH_PREFIX
26
+
27
+ python $BIGSCIENCE_REPO/data/catalogue/sample_and_convert_to_jsonl.py \
28
+ --dataset-path $DATASET_PATH\
29
+ --save-jsonl-dataset-path-prefix $SAVE_JSON_DATASET_PATH_PREFIX \
30
+ --num-proc 10 \
31
+ --batch-size 10000
data/catalogue/get_sizes.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from typing import List, Dict
4
+ from datasets import Dataset, load_dataset
5
+ from multiprocessing import cpu_count
6
+
7
+
8
+ def get_size_per_example(texts: List[str]) -> Dict:
9
+ size_values = [len(text.encode()) for text in texts]
10
+ examples = {"bytes_len": size_values}
11
+ return examples
12
+
13
+
14
+ def full_size_estimation(
15
+ ds: Dataset,
16
+ batch_size: int,
17
+ content_key: str = "text",
18
+ num_proc: int = cpu_count(),
19
+ ) -> int:
20
+ if len(ds) == 0:
21
+ return 0
22
+
23
+ ds_with_size = ds.map(
24
+ get_size_per_example,
25
+ batched=True,
26
+ num_proc=num_proc,
27
+ batch_size=batch_size,
28
+ input_columns=[content_key],
29
+ remove_columns=ds.column_names,
30
+ )
31
+ len_bytes = sum(ds_with_size["bytes_len"])
32
+ return len_bytes
33
+
34
+
35
+ def get_args():
36
+ parser = argparse.ArgumentParser()
37
+ parser.add_argument(
38
+ "--input-path",
39
+ type=str,
40
+ required=True,
41
+ help="path to jsonl file containing the data",
42
+ )
43
+ parser.add_argument(
44
+ "--output-folder",
45
+ type=str,
46
+ required=True,
47
+ help="path to jsonl file containing the data",
48
+ )
49
+ return parser.parse_args()
50
+
51
+
52
+ if __name__ == "__main__":
53
+ args = get_args()
54
+ ds = load_dataset("json", data_files=args.input_path, split="train")
55
+ size = full_size_estimation(ds, batch_size=32)
56
+ dataset_name = os.path.basename(args.input_path)[:-6]
57
+ with open(os.path.join(args.output_folder, dataset_name), "w") as f:
58
+ f.write(str(size))
data/catalogue/get_sizes.slurm ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=get_sizes # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/get_sizes/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-497
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+
17
+ # common repo
18
+ BIGSCIENCE_REPO=/gpfswork/rech/six/commun/code/bigscience
19
+ cd $BIGSCIENCE_REPO
20
+ OUTPUT_FOLDER=$BIGSCIENCE_REPO/sizes_per_dataset/
21
+ mkdir -p $OUTPUT_FOLDER
22
+
23
+ DATASET_PATHS=($(ls -d $six_ALL_CCFRWORK/bigscience-training/jsonls/**/**/*.jsonl))
24
+ DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}
25
+
26
+ DATASET_NAME_WITH_JSONL=$(basename $DATASET_PATH)
27
+ DATASET_NAME=${DATASET_NAME_WITH_JSONL:0:-6}
28
+
29
+ export HF_DATASETS_OFFLINE=1
30
+ export TRANSFORMERS_OFFLINE=1
31
+
32
+ /usr/bin/time -v python data/catalogue/get_sizes.py \
33
+ --input-path $DATASET_PATH \
34
+ --output-folder $OUTPUT_FOLDER
data/catalogue/load_ratios_meg_ds_format.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+
4
+
5
+ def get_args():
6
+ parser = argparse.ArgumentParser()
7
+ parser.add_argument(
8
+ "--dataset-ratios-path",
9
+ type=str,
10
+ required=True,
11
+ help="path to JSON file containing input dataset ratios. Values ares dictionary: {'dataset_path': str, 'ratio': float}",
12
+ )
13
+ parser.add_argument(
14
+ "--split",
15
+ choices=["train", "valid", "test"]
16
+ )
17
+ parser.add_argument(
18
+ "--output-meg-ds-ratio-file",
19
+ type=str,
20
+ required=True,
21
+ help="path to output the language ratio file",
22
+ )
23
+ return parser.parse_args()
24
+
25
+ TOKEN_RANGES={
26
+ "train": "0:0.950",
27
+ "valid": "0.950:1.0",
28
+ }
29
+
30
+ def main():
31
+ args = get_args()
32
+
33
+ token_range = TOKEN_RANGES[args.split]
34
+
35
+ with open(args.dataset_ratios_path, "r") as fi:
36
+ ds_ratios = json.load(fi)
37
+
38
+ main_dataset = [f"{ds_ratio['ratio']} {token_range} {ds_ratio['dataset_path']}" for ds_ratio in ds_ratios]
39
+ if args.split == "train":
40
+ final_string = f"\"{args.split}: " + ", ".join(main_dataset) + "\"\n"
41
+ elif args.split == "valid":
42
+ main_dataset_string = f"\"{args.split}: " + ", ".join(main_dataset) + "\""
43
+ additional_datasets = [f"\"valid_{ds_ratio['dataset_path'].split('/')[-2]}: 1 {token_range} {ds_ratio['dataset_path']}\"" for ds_ratio in ds_ratios]
44
+ final_string = main_dataset_string + " " + " ".join(additional_datasets) + "\n"
45
+ else:
46
+ raise ValueError(f"unknown split string {args.split}")
47
+
48
+
49
+ # TODO: you can add some extra dataset names for validation/test
50
+ with open(args.output_meg_ds_ratio_file, "w") as fi:
51
+ fi.write(final_string)
52
+
53
+ if __name__ == "__main__":
54
+ main()
data/catalogue/merge_dataset_per_language.backup.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ from collections import defaultdict
4
+
5
+ import regex as re
6
+
7
+ def get_args():
8
+ parser = argparse.ArgumentParser()
9
+ parser.add_argument(
10
+ "--dataset-ratios-path",
11
+ type=str,
12
+ required=True,
13
+ help="path to JSON file containing input dataset ratios. Values ares dictionary: {'dataset_path': str, 'ratio': float}",
14
+ )
15
+ parser.add_argument(
16
+ "--split",
17
+ choices=["train", "valid", "test"]
18
+ )
19
+ parser.add_argument(
20
+ "--meg-ds-dataset-prefix",
21
+ type=str,
22
+ required=True,
23
+ help="We add `lang` to that prefix in order to designate the path for a languages specific dataset."
24
+ )
25
+ parser.add_argument(
26
+ "--output-ratio-file",
27
+ type=str,
28
+ required=True,
29
+ help="path to output the language ratio file",
30
+ )
31
+ return parser.parse_args()
32
+
33
+
34
+ def main():
35
+ args = get_args()
36
+
37
+ # load training datasets
38
+ with open(args.dataset_ratios_path, "r") as fi:
39
+ ds_ratios = json.load(fi)
40
+
41
+ # get all individual languages
42
+ r = re.compile(r"^.*bigscience-catalogue-lm-data/lm_([^_]+)_.*$")
43
+ datasets_per_language = defaultdict(lambda: [])
44
+ for ds_ratio in ds_ratios:
45
+ candidate_lang = r.match(ds_ratio["dataset_path"]).group(1)
46
+ if candidate_lang == "hi":
47
+ ds_ratio["lang"] = "indic-hi"
48
+ else:
49
+ ds_ratio["lang"] = candidate_lang
50
+
51
+ merged_language = ds_ratio["lang"].split("-")[0]
52
+ # Merge zh languages
53
+ if candidate_lang in ["zhs", "zht"]:
54
+ merged_language = "zh"
55
+
56
+ datasets_per_language[merged_language].append(ds_ratio)
57
+
58
+ # save ratio result into a file (in json format, you can use `load_ratios_meg_ds_format` for get the meg_ds format)
59
+ language_ds_ratios = [
60
+ {
61
+ "ratio": sum([elt["ratio"] for elt in datasets]),
62
+ "dataset_path": args.meg_ds_dataset_prefix.format(lang=lang),
63
+ # Additional field to store in case we want to know what's in there.
64
+ "original_datasets": [
65
+ dataset["dataset_path"]
66
+ for dataset in datasets
67
+ ]
68
+ }
69
+ for lang, datasets in datasets_per_language.items()
70
+ ]
71
+
72
+ with open(args.output_ratio_file, "w") as fi:
73
+ json.dump(language_ds_ratios, fi, indent=2)
74
+
75
+ if __name__ == "__main__":
76
+ main()
data/catalogue/merge_dataset_per_languages.backup.slurm ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/merge-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=compil
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+ # We need a specific installation of tokenizers so that it works with bytefallback
16
+ conda activate thomas_data_tooling
17
+
18
+ BATCH_ID=0
19
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v2-dedup-lines-articles
20
+
21
+ # ======= Generate language ratio file ======
22
+
23
+ BIGSCIENCE_REPO=$six_ALL_CCFRWORK/code/bigscience
24
+
25
+ LANGUAGE_RATIOS_PATH=$BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_batch_${BATCH_ID}_per_language.json
26
+ MEG_DS_DATASET_PREFIX=$six_ALL_CCFRSCRATCH/bigscience-datasets/catalogue/meg-ds-per-lang/{lang}/"${TOKENIZER_NAME_OR_PATH//\//_}"_batch_${BATCH_ID}_text_document
27
+
28
+ mkdir -p $(dirname $MEG_DS_DATASET_PREFIX)
29
+
30
+ python $BIGSCIENCE_REPO/data/catalogue/merge_dataset_per_language.py \
31
+ --dataset-ratios-path $BIGSCIENCE_REPO/data/catalogue/training_dataset_ratios_batch_$BATCH_ID.json \
32
+ --split train \
33
+ --meg-ds-dataset-prefix $MEG_DS_DATASET_PREFIX \
34
+ --output-ratio-file $LANGUAGE_RATIOS_PATH
35
+
36
+ # ======= Generate merged files ======
37
+
38
+ MEG_DS_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
39
+ pushd $MEG_DS_REPO
40
+
41
+ readarray -t MERGE_ARGUMENTS < <(python -c "
42
+ import json
43
+ from pathlib import Path
44
+
45
+ with open(\"$LANGUAGE_RATIOS_PATH\", \"r\") as fi:
46
+ data = json.load(fi)
47
+
48
+ for elt in data:
49
+ Path(elt['dataset_path']).parent.mkdir(parents=True, exist_ok=True)
50
+
51
+ print('\n'.join([f\"--datasets {' '.join(elt['original_datasets'])} --output-prefix {elt['dataset_path']}\" for elt in data]))
52
+ ")
53
+
54
+ echo $MERGE_ARGUMENTS
55
+
56
+ for MERGE_ARGUMENT in "${MERGE_ARGUMENTS[@]}"
57
+ do
58
+ /usr/bin/time -v python -m tools.merge_preprocessed_data \
59
+ $MERGE_ARGUMENT
60
+ done
data/catalogue/merge_dataset_per_languages.slurm ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/merge-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-48
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # We need a specific installation of tokenizers so that it works with bytefallback
17
+ conda activate thomas_data_tooling
18
+
19
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
20
+
21
+ # ======= Generate merged files ======
22
+
23
+ MEG_DS_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
24
+ pushd $MEG_DS_REPO
25
+
26
+ BASE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/meg-ds_v2
27
+ LANGUAGES=($(ls $BASE_PATH))
28
+ LANG=${LANGUAGES[$SLURM_ARRAY_TASK_ID]}
29
+
30
+ SAVE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/merged-meg-ds_v2/$LANG/"${TOKENIZER_NAME_OR_PATH//\//_}"_${LANG}_text_document
31
+ # fancy way of collecting all datasets within a folder
32
+ DATASETS=$(ls $six_ALL_CCFRSCRATCH/bigscience-datasets/meg-ds_v2/$LANG/**/*.bin | xargs -I {} python -c "print('{}'[:-4])")
33
+
34
+ mkdir -p $(dirname $SAVE_PATH)
35
+
36
+ /usr/bin/time -v python -m tools.merge_preprocessed_data \
37
+ --datasets $DATASETS \
38
+ --output-prefix $SAVE_PATH
data/catalogue/merge_dataset_per_languages_v3.slurm ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/merge-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-48
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # We need a specific installation of tokenizers so that it works with bytefallback
17
+ conda activate thomas_data_tooling
18
+
19
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
20
+
21
+ # ======= Generate merged files ======
22
+
23
+ MEG_DS_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
24
+ pushd $MEG_DS_REPO
25
+
26
+ BASE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/meg-ds_v2
27
+ LANGUAGES=($(ls $BASE_PATH))
28
+ LANG=${LANGUAGES[$SLURM_ARRAY_TASK_ID]}
29
+
30
+ SAVE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/merged-meg-ds_v3_pii/$LANG/"${TOKENIZER_NAME_OR_PATH//\//_}"_${LANG}_text_document
31
+ # fancy way of collecting all datasets within a folder
32
+ DATASETS=$(ls $six_ALL_CCFRSCRATCH/bigscience-datasets/meg-ds_v2/$LANG/**/*.bin | xargs -I {} python -c "print('{}'[:-4])")
33
+
34
+ mkdir -p $(dirname $SAVE_PATH)
35
+
36
+ /usr/bin/time -v python -m tools.merge_preprocessed_data \
37
+ --datasets $DATASETS \
38
+ --output-prefix $SAVE_PATH
data/catalogue/merge_nigercongo.slurm ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/merge-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-0
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # We need a specific installation of tokenizers so that it works with bytefallback
17
+ conda activate thomas_data_tooling
18
+
19
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
20
+
21
+ # ======= Generate merged files ======
22
+
23
+ MEG_DS_REPO=$six_ALL_CCFRWORK/code/Megatron-DeepSpeed
24
+ pushd $MEG_DS_REPO
25
+
26
+ BASE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/nigercongo_fusion
27
+ LANGUAGES=($(ls $BASE_PATH))
28
+ LANG=${LANGUAGES[$SLURM_ARRAY_TASK_ID]}
29
+
30
+ SAVE_PATH=$six_ALL_CCFRSCRATCH/bigscience-datasets/merged-meg-ds_v3_pii/$LANG/"${TOKENIZER_NAME_OR_PATH//\//_}"_${LANG}_text_document
31
+ # fancy way of collecting all datasets within a folder
32
+ DATASETS=$(ls $six_ALL_CCFRSCRATCH/bigscience-datasets/nigercongo_fusion/$LANG/**/*.bin | xargs -I {} python -c "print('{}'[:-4])")
33
+
34
+ mkdir -p $(dirname $SAVE_PATH)
35
+
36
+ /usr/bin/time -v python -m tools.merge_preprocessed_data \
37
+ --datasets $DATASETS \
38
+ --output-prefix $SAVE_PATH
data/catalogue/oscar-piiv2-jsonl-to-meg-ds.slurm ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=catalogue-jsonl-to-meg-ds # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=logs/catalogue-jsonl-to-meg-ds/%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --array=0-11
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ # We need a specific installation of tokenizers so that it works with bytefallback
17
+ conda activate thomas_data_tooling
18
+
19
+ # ======= Generate meg-ds file ======
20
+
21
+ DATASET_PATHS=($(ls -d /gpfsscratch/rech/six/commun/bigscience-datasets/pii_no_id_no_num/post/*.jsonl))
22
+ DATASET_PATH=${DATASET_PATHS[$SLURM_ARRAY_TASK_ID]}
23
+
24
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
25
+
26
+ DATASET_NAME_WITH_JSONL=$(basename $DATASET_PATH)
27
+ DATASET_NAME=${DATASET_NAME_WITH_JSONL:0:-6}
28
+ LANG=$(basename $(dirname $DATASET_PATH))
29
+ SAVE_MEG_DS_DATASET=$six_ALL_CCFRSCRATCH/bigscience-datasets/oscar_pii_v2/$LANG/"$DATASET_NAME"/meg_ds_"${TOKENIZER_NAME_OR_PATH//\//_}"
30
+
31
+ mkdir -p $(dirname $SAVE_MEG_DS_DATASET)
32
+
33
+ if [[ -f "$SAVE_MEG_DS_DATASET"_text_document.bin ]];
34
+ then
35
+ echo "$SAVE_MEG_DS_DATASET exists."
36
+ exit 0
37
+ fi
38
+
39
+ export HF_DATASETS_OFFLINE=1
40
+ export TRANSFORMERS_OFFLINE=1
41
+
42
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
43
+ /usr/bin/time -v python tools/preprocess_data_many_cores.py \
44
+ --input $DATASET_PATH \
45
+ --output-prefix $SAVE_MEG_DS_DATASET \
46
+ --dataset-impl mmap \
47
+ --tokenizer-type PretrainedFromHF \
48
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
49
+ --append-eod \
50
+ --workers 40
data/catalogue/sample_and_convert_to_jsonl.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import re
4
+ from pathlib import Path
5
+
6
+ from datasets import Dataset, load_from_disk
7
+ from datasets.utils.logging import set_verbosity_info
8
+ from numpy.random import default_rng
9
+
10
+ set_verbosity_info()
11
+ logger = logging.getLogger(__name__)
12
+ rng = default_rng(42)
13
+
14
+ CATALOGUE_DATASETS = {
15
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_enriched_conllu_ancora_for_ml_training": 1.,
16
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_parlament_parla": 1.,
17
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_ted_talks_iwslt": 1.,
18
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_odiencorp": 1.,
19
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_ted_talks_iwslt": 1.,
20
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_wiktionary_filtered": 1.,
21
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_book_dash_books": 1.,
22
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_uit_vsmec": 1.,
23
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_mkb": 1.,
24
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_talpco": 1.,
25
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_book_dash_books": 1.,
26
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vietnamese_students_feedback": 1.,
27
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_xquad_ca": 1.,
28
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikimedia_filtered": 1.,
29
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_opus100": 1.,
30
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_401_www_elperiodicodemexico_com": 1.,
31
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indonesian_frog_storytelling_corpus": 1.,
32
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_wikibooks_filtered": 1.,
33
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_opus100": 1.,
34
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_mkb": 1.,
35
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_opus100": 1.,
36
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-tum_aggregated": 1.,
37
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_opus100": 1.,
38
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_opus100": 1.,
39
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_opus100": 1.,
40
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_ted_talks_iwslt": 1.,
41
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indonli": 1.,
42
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-bm_aggregated": 1.,
43
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ki_aggregated": 1.,
44
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_ted_talks_iwslt": 1.,
45
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_mkb": 1.,
46
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_mkb": 1.,
47
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_ted_talks_iwslt": 1.,
48
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_wikiquote_filtered": 1.,
49
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_wikiquote_filtered": 1.,
50
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_viquiquad": 1.,
51
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_mkb": 1.,
52
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_wikiquote_filtered": 1.,
53
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_mkb": 1.,
54
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_mkb": 1.,
55
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_ted_talks_iwslt": 1.,
56
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ak_aggregated": 1.,
57
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ts_aggregated": 1.,
58
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-st_aggregated": 1.,
59
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_ted_talks_iwslt": 1.,
60
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_mkb": 1.,
61
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_vilaquad": 1.,
62
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_opus100": 1.,
63
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_opus100": 1.,
64
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_bangla_sentiment_classification_datasets": 1.,
65
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_wiktionary_filtered": 1.,
66
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikiversity_filtered": 1.,
67
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ny_aggregated": 1.,
68
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikimedia_filtered": 1.,
69
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-tw_aggregated": 1.,
70
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_wikiquote_filtered": 1.,
71
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_mkb": 1.,
72
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_wiktionary_filtered": 1.,
73
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_ted_talks_iwslt": 1.,
74
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_wikiquote_filtered": 1.,
75
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-tn_aggregated": 1.,
76
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_open_subtitles": 1.,
77
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_wikiquote_filtered": 1.,
78
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wikiquote_filtered": 1.,
79
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_wiktionary_filtered": 1.,
80
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikimedia_filtered": 1.,
81
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_wikibooks_filtered": 1.,
82
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ln_aggregated": 1.,
83
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikivoyage_filtered": 1.,
84
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_ted_talks_iwslt": 1.,
85
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_opus100": 1.,
86
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_open_subtitles": 1.,
87
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_wikibooks_filtered": 1.,
88
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_wikibooks_filtered": 1.,
89
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-nso_aggregated": 1.,
90
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vinbigdata_asr_vlsp_2020": 1.,
91
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_ted_talks_iwslt": 1.,
92
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_ted_talks_iwslt": 1.,
93
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_wikibooks_filtered": 1.,
94
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wikimedia_filtered": 1.,
95
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_scielo": 1.,
96
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_open_subtitles": 1.,
97
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_wikiquote_filtered": 1.,
98
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-rn_aggregated": 1.,
99
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vinbigdata_mt_vlsp_2020": 1.,
100
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_parallel": 1.,
101
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_bppt": 1.,
102
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wiktionary_filtered": 1.,
103
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-wo_aggregated": 1.,
104
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikiversity_filtered": 1.,
105
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_ted_talks_iwslt": 1.,
106
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_opus100": 1.,
107
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wikiquote_filtered": 1.,
108
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zht_qedcorpus": 1.,
109
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wikibooks_filtered": 1.,
110
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_qedcorpus": 1.,
111
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_wikibooks_filtered": 1.,
112
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_opus100": 1.,
113
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-lg_aggregated": 1.,
114
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_502_www_ricemedia_co": 1.,
115
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zhs_qedcorpus": 1.,
116
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_opus100": 1.,
117
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikiquote_filtered": 1.,
118
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_wiktionary_filtered": 1.,
119
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_ted_talks_iwslt": 1.,
120
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-fon_aggregated": 1.,
121
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_recibrew": 1.,
122
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_opus100": 1.,
123
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_wikivoyage_filtered": 1.,
124
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wiktionary_filtered": 1.,
125
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wikiquote_filtered": 1.,
126
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikiquote_filtered": 1.,
127
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-sn_aggregated": 1.,
128
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_wiktionary_filtered": 1.,
129
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_arabench": 1.,
130
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_qedcorpus": 1.,
131
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_opus100": 1.,
132
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_30_www_radiocable_com": 1.,
133
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_open_subtitles": 1.,
134
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_wiktionary_filtered": 1.,
135
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_opus100": 1.,
136
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wikivoyage_filtered": 1.,
137
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikibooks_filtered": 1.,
138
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikinews_filtered": 1.,
139
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_qedcorpus": 1.,
140
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-zu_aggregated": 1.,
141
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikivoyage_filtered": 1.,
142
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikiquote_filtered": 1.,
143
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wiktionary_filtered": 1.,
144
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_samanantar": 1.,
145
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_wikibooks_filtered": 1.,
146
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_ted_talks_iwslt": 1.,
147
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wikiquote_filtered": 1.,
148
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_pseudocrawl-filtered_674_ai_baidu_com": 1.,
149
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikibooks_filtered": 1.,
150
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_ester": 1.,
151
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_wiktionary_filtered": 1.,
152
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wikiversity_filtered": 1.,
153
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_wiktionary_filtered": 1.,
154
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-ig_aggregated": 1.,
155
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_qedcorpus": 1.,
156
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wikinews_filtered": 1.,
157
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wikivoyage_filtered": 1.,
158
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wikibooks_filtered": 1.,
159
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_492_www_vivawoman_net": 1.,
160
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikiquote_filtered": 1.,
161
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_pib": 1.,
162
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_pib": 1.,
163
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wikibooks_filtered": 1.,
164
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wikinews_filtered": 1.,
165
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_pib": 1.,
166
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikiversity_filtered": 1.,
167
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_bengali_question_answering": 1.,
168
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_244_www_df_cl": 1.,
169
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_open_subtitles": 1.,
170
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikiquote_filtered": 1.,
171
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_scielo": 1.,
172
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_ted_talks_iwslt": 1.,
173
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_485_blog_moneysmart_sg": 1.,
174
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_ted_talks_iwslt": 1.,
175
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_488_dailyvanity_sg": 1.,
176
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_696_www_oercommons_org": 1.,
177
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wiktionary_filtered": 1.,
178
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikinews_filtered": 1.,
179
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_ted_talks_iwslt": 1.,
180
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_483_alvinology_com": 1.,
181
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_kompas": 1.,
182
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_qedcorpus": 1.,
183
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-xh_aggregated": 1.,
184
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_odiencorp": 1.,
185
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_ted_talks_iwslt": 1.,
186
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_opus100": 1.,
187
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_open_subtitles": 1.,
188
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_habibi": 1.,
189
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikiversity_filtered": 1.,
190
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wikibooks_filtered": 1.,
191
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_pib": 1.,
192
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_pib": 1.,
193
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_opus100": 1.,
194
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikiquote_filtered": 1.,
195
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_ted_talks_iwslt": 1.,
196
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_wikisource_filtered": 1.,
197
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_pseudocrawl-filtered_672_pt_globalvoices_org": 1.,
198
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_labr": 1.,
199
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_339_www_actasanitaria_com": 1.,
200
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_wiktionary_filtered": 1.,
201
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_ted_talks_iwslt": 1.,
202
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_pib": 1.,
203
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikinews_filtered": 1.,
204
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_pib": 1.,
205
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_open_subtitles": 1.,
206
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikinews_filtered": 1.,
207
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikivoyage_filtered": 1.,
208
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_pib": 1.,
209
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikivoyage_filtered": 1.,
210
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikibooks_filtered": 1.,
211
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_pseudocrawl-filtered_563_ahotsak_eus": 1.,
212
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_tempo": 1.,
213
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_32_www_elexpresso_com": 1.,
214
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indosum": 1.,
215
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_open_subtitles": 1.,
216
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_91_www_diario26_com": 1.,
217
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-rw_aggregated": 1.,
218
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_ted_talks_iwslt": 1.,
219
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_opus100": 1.,
220
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wiktionary_filtered": 1.,
221
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_500_www_asiaone_com_singapore": 1.,
222
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_pseudocrawl-filtered_530_www_mediapart_fr": 1.,
223
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_pib": 1.,
224
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikibooks_filtered": 1.,
225
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_470_forums_hardwarezone_com_sg": 1.,
226
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_223_www_eltambor_es": 1.,
227
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indo4b_jw300": 1.,
228
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_219_www_aguasresiduales_info": 1.,
229
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_376_www_elpopular_com_ar": 1.,
230
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_62_www_lapagina_com_sv": 1.,
231
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_wikisource_filtered": 1.,
232
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_data_on_covid_19_news_coverage_in_vietnam": 1.,
233
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_548_remezcla_com": 1.,
234
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wiktionary_filtered": 1.,
235
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_499_www_today_com_news": 1.,
236
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikinews_filtered": 1.,
237
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vntq_corpus_big": 1.,
238
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_pseudocrawl-filtered_545_www_detik_com": 1.,
239
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_wiktionary_filtered": 1.,
240
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_wikipedia": 1.,
241
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_pseudocrawl-filtered_637_www_argia_eus": 1.,
242
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_501_theindependent_sg": 1.,
243
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_430_www_eldiario_ec": 1.,
244
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_420_www_retema_es": 1.,
245
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_wikipedia": 1.,
246
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_kalimat": 1.,
247
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_487_thesmartlocal_com": 1.,
248
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikisource_filtered": 1.,
249
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_153_financialfood_es": 1.,
250
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikinews_filtered": 1.,
251
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wikisource_filtered": 1.,
252
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikibooks_filtered": 1.,
253
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_samanantar": 1.,
254
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_158_www_diariodeleon_es": 1.,
255
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_pseudocrawl-filtered_599_fr_globalvoices_org": 1.,
256
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_392_www_muypymes_com": 1.,
257
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_pseudocrawl-filtered_506_goiena_eus": 1.,
258
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikibooks_filtered": 1.,
259
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_pib": 1.,
260
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_231_ojo_pe": 1.,
261
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_167_www_ambientum_com": 1.,
262
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wiktionary_filtered": 1.,
263
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vietnamese_poetry": 1.,
264
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_118_www_elheraldo_hn": 1.,
265
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_233_www_dinero_com": 1.,
266
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_pseudocrawl-filtered_635_www_berria_eus": 1.,
267
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_395_www_evwind_es": 1.,
268
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_182_correodelsur_com": 1.,
269
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikiversity_filtered": 1.,
270
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_56_www_eluniverso_com": 1.,
271
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_project_gutenberg": 1.,
272
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_250_www_cooperativa_cl": 1.,
273
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_374_www_talcualdigital_com": 1.,
274
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-yo_aggregated": 1.,
275
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_315_lasillavacia_com": 1.,
276
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_207_elimpulso_com": 1.,
277
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_276_radio_uchile_cl": 1.,
278
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_325_www_laprensa_hn": 1.,
279
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_294_www_laopinion_com_co": 1.,
280
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_wikipedia": 1.,
281
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_34_www_losandes_com_ar": 1.,
282
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_90_peru_com": 1.,
283
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_23_www_elconfidencialdigital_com": 1.,
284
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_125_www_noticiasde_es": 1.,
285
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_359_www_efeverde_com": 1.,
286
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_samanantar": 1.,
287
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_wikisource_filtered": 1.,
288
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_qedcorpus": 1.,
289
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_381_www_cuartopoder_es": 1.,
290
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_518_www_elcolombiano_com": 1.,
291
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_280_salamancartvaldia_es": 1.,
292
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_341_es_cointelegraph_com": 1.,
293
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_samanantar": 1.,
294
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_165_www_ticbeat_com": 1.,
295
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_samanantar": 1.,
296
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_641_es_globalvoices_org": 1.,
297
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wikisource_filtered": 1.,
298
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_209_misionesonline_net": 1.,
299
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_wikipedia": 1.,
300
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wiktionary_filtered": 1.,
301
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_246_www_eldiarionuevodia_com_ar": 1.,
302
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_86_www_motorpasion_com": 1.,
303
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_257_www_diaridetarragona_com": 1.,
304
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_samanantar": 1.,
305
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_53_www_expreso_ec": 1.,
306
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_367_elcorreoweb_es": 1.,
307
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_159_www_postcrescent_com": 1.,
308
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_422_www_formulatv_com": 1.,
309
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_project_gutenberg": 1.,
310
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_pseudocrawl-filtered_503_www_zaobao_com_sg": 1.,
311
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_638_globalvoices_org": 1.,
312
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_78_www_listindiario_com": 1.,
313
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_277_www_entornointeligente_com": 1.,
314
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_404_www_telam_com_ar": 1.,
315
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_431_www_elperiodicoextremadura_com": 1.,
316
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_254_diario_mx": 1.,
317
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_169_www_el_carabobeno_com": 1.,
318
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_417_www_radiolaprimerisima_com": 1.,
319
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_21_www_elperiodicodearagon_com": 1.,
320
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_open_subtitles": 1.,
321
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_498_www_channelnewsasia_com": 1.,
322
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_304_www_semana_com": 1.,
323
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_386_www_prensalibre_com": 1.,
324
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_samanantar": 1.,
325
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_urdu-monolingual-corpus": 1.,
326
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_286_www_nacion_com": 1.,
327
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_67_www_elpais_cr": 1.,
328
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_405_www_emol_com": 1.,
329
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_317_diariocorreo_pe": 1.,
330
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-as_wikisource_filtered": 1.,
331
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_wikisource_filtered": 1.,
332
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_wikipedia": 1.,
333
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_pseudocrawl-filtered_512_kumparan_com": 1.,
334
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_237_www_cronista_com": 1.,
335
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_116_www_latribuna_hn": 1.,
336
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indonesian_news_corpus": 1.,
337
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_172_www_rionegro_com_ar": 1.,
338
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_226_www_ole_com_ar": 1.,
339
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikiversity_filtered": 1.,
340
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_287_www_cibercuba_com": 1.,
341
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wiktionary_filtered": 1.,
342
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_pseudocrawl-filtered_572_tirto_id": 1.,
343
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_samanantar": 1.,
344
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_157_www_elsoldemexico_com_mx": 1.,
345
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_44_ladiaria_com_uy": 1.,
346
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_catalan_government_crawling": 1.,
347
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikivoyage_filtered": 1.,
348
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_213_www_hola_com": 1.,
349
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_324_gestion_pe": 1.,
350
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_28_www_fayerwayer_com": 1.,
351
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_nigercongo-sw_aggregated": 1.,
352
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_samanantar": 1.,
353
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_354_www_lagaceta_com_ar": 1.,
354
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikisource_filtered": 1.,
355
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_534_www_nairaland_com": 1.,
356
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_samanantar": 1.,
357
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_253_www_debate_com_mx": 1.,
358
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_248_www_telesurtv_net": 1.,
359
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_406_www_americaeconomia_com": 1.,
360
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_130_www_elperiodicomediterraneo_com": 1.,
361
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_232_tn_com_ar": 1.,
362
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_leipzig_wortschatz_urdu_newscrawl_2016_sentences": 1.,
363
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_71_www_rtve_es": 1.,
364
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_220_www_vanguardia_com_mx": 1.,
365
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_pseudocrawl-filtered_549_www_cnnindonesia_com": 1.,
366
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_project_gutenberg": 1.,
367
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_wikipedia": 1.,
368
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vietai_sat": 1.,
369
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_tecla": 1.,
370
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_203_www_que_es": 1.,
371
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_320_www_paginasiete_bo": 1.,
372
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_181_noticiassin_com": 1.,
373
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_wikipedia": 1.,
374
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_wikisource_filtered": 1.,
375
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_the_pile_europarl": 1.,
376
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_675_www_elespectador_com": 1.,
377
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_146_www_perfil_com": 1.,
378
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_wikisource_filtered": 1.,
379
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_the_pile_europarl": 1.,
380
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_the_pile_europarl": 1.,
381
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_brad_2": 1.,
382
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_79_www_laopiniondemurcia_es": 1.,
383
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_229_www_expansion_com": 1.,
384
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_wikipedia": 1.,
385
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_299_www_lne_es": 1.,
386
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_263_www_lasexta_com": 1.,
387
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_the_pile_europarl": 1.,
388
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikiquote_filtered": 1.,
389
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_136_valenciaplaza_com": 1.,
390
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_256_www_laprovincia_es": 1.,
391
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_373_www_farodevigo_es": 1.,
392
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_royal_society_corpus": 1.,
393
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_245_www_noticiasdenavarra_com": 1.,
394
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_pseudocrawl-filtered_515_www_aajtak_in": 1.,
395
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_wikisource_filtered": 1.,
396
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wiktionary_filtered": 1.,
397
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_wikipedia": 1.,
398
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_58_www_levante_emv_com": 1.,
399
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_198_www_eleconomista_es": 1.,
400
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_249_www_telecinco_es": 1.,
401
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_288_www_marca_com": 1.,
402
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_ksucca": 1.,
403
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_189_www_eleconomista_com_mx": 1.,
404
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_scielo": 1.,
405
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_103_www_elmostrador_cl": 1.,
406
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_wikipedia": 1.,
407
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_wikipedia": 1.,
408
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ur_leipzig_wortschatz_urdu-pk_web_2019_sentences": 1.,
409
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_211_www_elcomercio_com": 1.,
410
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_wikisource_filtered": 1.,
411
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_indic_nlp_corpus": 1.,
412
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_sanad": 1.,
413
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikibooks_filtered": 1.,
414
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_429_cadenaser_com": 1.,
415
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikipedia": 1.,
416
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_open_subtitles": 1.,
417
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_wikipedia": 1.,
418
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_267_www_elperiodico_com_es": 1.,
419
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_wikisource_filtered": 1.,
420
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_pseudocrawl-filtered_595_mawdoo3_com": 1.,
421
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_215_www_lainformacion_com": 1.,
422
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wikipedia": 1.,
423
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_wikipedia": 1.,
424
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh-tw_wikipedia": 1.,
425
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh-cn_wikipedia": 1.,
426
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_255_elcomercio_pe": 1.,
427
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-gu_indic_nlp_corpus": 1.,
428
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-or_indic_nlp_corpus": 1.,
429
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikisource_filtered": 1.,
430
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_id_indonesian_news_articles_2017": 1.,
431
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_409_www_proceso_com_mx": 1.,
432
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_open_subtitles": 1.,
433
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_510_timesofindia_indiatimes_com": 1.,
434
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_349_www_eltiempo_com": 1.,
435
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_samanantar": 1.,
436
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_424_www_lavanguardia_com": 1.,
437
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_100_www_aporrea_org": 1.,
438
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_vinbigdata_monolingual_vlsp_2020": 1.,
439
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_eu_bsbasque": 1.,
440
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_497_www_straitstimes_com": 1.,
441
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_396_www_eldiario_es": 1.,
442
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_wikipedia": 1.,
443
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-pa_indic_nlp_corpus": 1.,
444
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_tashkeela": 1.,
445
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-mr_indic_nlp_corpus": 1.,
446
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_du_reader": 1.,
447
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikisource_filtered": 1.,
448
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_20_www_clarin_com": 1.,
449
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_pseudocrawl-filtered_689_www_abc_net_au": 1.,
450
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_pseudocrawl-filtered_667_www_bhaskar_com": 1.,
451
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_63_www_lanacion_com_ar": 1.,
452
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-kn_indic_nlp_corpus": 1.,
453
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_pseudocrawl-filtered_333_www_elmundo_es": 1.,
454
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_project_gutenberg": 1.,
455
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_pseudocrawl-filtered_550_www_lemonde_fr": 1.,
456
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_multi_un_2": 1.,
457
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-te_indic_nlp_corpus": 1.,
458
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_wikipedia": 1.,
459
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_wikisource_filtered": 1.,
460
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_uncorpus": 1.,
461
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_multi_un_2": 1.,
462
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_catalan_general_crawling": 1.,
463
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_wikipedia": 1.,
464
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_multi_un_2": 1.,
465
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ne_unsupervised_cross_lingual_representation_learning_at_scale": 1.,
466
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ml_indic_nlp_corpus": 1.,
467
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_multi_un_2": 1.,
468
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_multi_un_2": 1.,
469
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_wikipedia": 1.,
470
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_wikisource_filtered": 1.,
471
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_open_subtitles": 1.,
472
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikipedia": 1.,
473
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_open_subtitles": 1.,
474
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_open_subtitles": 1.,
475
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_iitb_english_hindi_corpus": 1.,
476
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_uncorpus": 1.,
477
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_uncorpus": 1.,
478
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_no_code_stackexchange": 1.,
479
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_uncorpus": 1.,
480
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-ta_indic_nlp_corpus": 1.,
481
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_uncorpus": 1.,
482
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_es_open_subtitles": 1.,
483
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-hi_indic_nlp_corpus": 1.,
484
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_vi_binhvq_news_corpus": 1.,
485
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ca_catalan_textual_corpus": 1.,
486
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_indic-bn_bangla_lm": 1.,
487
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_wikipedia": 1.,
488
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_open_subtitles": 1.,
489
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_wikisource_filtered": 1.,
490
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_openiti_proc": 1.,
491
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_ar_arabic_billion_words": 1.,
492
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_pt_brwac": 1.,
493
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_project_gutenberg": 1.,
494
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_the_pile_uspto": 0.5441176470588235, # ((350 [expected] - ( 326 [catalogue_en] - 251 [s2orc] - 21 [uspto]) ) * 1/2 [catalogue_en_proportion]) / (251 [s2orc] + 21 [uspto])
495
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_code_stackexchange": 1.,
496
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_fr_hal_archives_ouvertes": 1.,
497
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_code_github-no-gpl": 1.,
498
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_zh_wudaocorpora": 1.,
499
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_en_s2orc_ai2_pdf_parses": 0.5441176470588235, # ((350 [expected] - ( 326 [catalogue_en] - 251 [s2orc] - 21 [uspto]) ) * 1/2 [catalogue_en_proportion]) / (251 [s2orc] + 21 [uspto])
500
+ }
501
+ OSCAR_DATASETS = {
502
+ # oscar
503
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/ar": 1,
504
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/bn": 1,
505
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/ca": 1,
506
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/en": 0.13454545454545455, # ((350 [expected] - ( 326 [catalogue_en] - 251 [s2orc] - 21 [uspto]) ) * 1/2 [oscar_en proportion] ) / 1_100 [oscar_en]
507
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/es": 1,
508
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/eu": 1,
509
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/fr": 1,
510
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/hi": 1,
511
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/id": 1,
512
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/pt": 1,
513
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/ur": 1,
514
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/vi": 1,
515
+ "/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/zh": 1
516
+ }
517
+ assert set(OSCAR_DATASETS.keys()).isdisjoint(set(CATALOGUE_DATASETS.keys()))
518
+
519
+ def get_args():
520
+ parser = argparse.ArgumentParser()
521
+ parser.add_argument(
522
+ "--dataset-path", choices=list(set(CATALOGUE_DATASETS.keys()) | set(OSCAR_DATASETS.keys())), type=str, required=True,
523
+ help="Dataset path."
524
+ )
525
+ parser.add_argument(
526
+ "--save-jsonl-dataset-path-prefix", type=Path, required=True,
527
+ help="Where to output json file. Files will be save in `{args.save_jsonl_dataset_path_prefix}/{lang}/{dataset_name}"
528
+ )
529
+ parser.add_argument(
530
+ "--num-proc", type=int, default=1
531
+ )
532
+ parser.add_argument(
533
+ "--batch-size", type=int
534
+ )
535
+ return parser.parse_args()
536
+
537
+
538
+ catalogue_language_regex = re.compile(
539
+ r"^/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/lm_([^_]+)_.*$"
540
+ )
541
+ normalise_catalogue_dataset_name_regex = re.compile(
542
+ r"^/gpfsscratch/rech/six/commun/bigscience-datasets/catalogue/clean_v2/bigscience-catalogue-lm-data/(.*)$"
543
+ )
544
+ def get_catalogue_language(dataset_name: str) -> str:
545
+ lang_candidate = catalogue_language_regex.match(dataset_name).group(1)
546
+
547
+ # Normalise chinese languages, so that we only consider simplified and traditional chinese as the two chinese languages
548
+ if lang_candidate in ["zh", "zhs", "zh-cn"]:
549
+ lang_candidate = "zhs"
550
+ elif lang_candidate in ["zht", "zh-tw"]:
551
+ lang_candidate = "zht"
552
+ else:
553
+ assert lang_candidate[:2] != "zh"
554
+
555
+ return lang_candidate
556
+
557
+ oscar_to_bs_language = {
558
+ "ar": "ar",
559
+ "bn": "indic-bn",
560
+ "ca": "ca",
561
+ "en": "en",
562
+ "es": "es",
563
+ "eu": "eu",
564
+ "fr": "fr",
565
+ "hi": "indic-hi",
566
+ "id": "id",
567
+ "pt": "pt",
568
+ "ur": "indic-ur",
569
+ "vi": "vi",
570
+ "zh": "zhs"
571
+ }
572
+ oscar_language_regex = re.compile(
573
+ r"^/gpfsscratch/rech/six/commun/bigscience-datasets/oscar_dedup/(.*)$"
574
+ )
575
+ def get_oscar_language(dataset_name: str) -> str:
576
+ return oscar_to_bs_language[oscar_language_regex.match(dataset_name).group(1)]
577
+
578
+
579
+ def sample_dataset(dataset: Dataset, ratio: float) -> Dataset:
580
+ logger.info(f"Ratio: {ratio}")
581
+ if ratio >= 1:
582
+ return dataset
583
+
584
+ num_samples = int(len(dataset) * ratio)
585
+ indices = rng.choice(len(dataset), size=num_samples, replace=False, shuffle=False)
586
+ return dataset.select(indices)
587
+
588
+ def main():
589
+ logging.basicConfig(
590
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
591
+ datefmt="%m/%d/%Y %H:%M:%S",
592
+ level=logging.INFO,
593
+ )
594
+ args = get_args()
595
+ logger.info(f"** The job is runned with the following arguments: **\n{args}\n **** ")
596
+
597
+ # Compute save path
598
+ save_path: Path
599
+ if args.dataset_path in CATALOGUE_DATASETS:
600
+ lang = get_catalogue_language(args.dataset_path)
601
+ filename = f"{normalise_catalogue_dataset_name_regex.match(args.dataset_path).group(1)}.jsonl"
602
+ save_path = Path(args.save_jsonl_dataset_path_prefix) / lang / filename
603
+ elif args.dataset_path in OSCAR_DATASETS:
604
+ lang = get_oscar_language(args.dataset_path)
605
+ save_path = Path(args.save_jsonl_dataset_path_prefix) / lang / f"lm_{lang}_oscar.jsonl"
606
+ else:
607
+ raise NotImplementedError
608
+
609
+ # Saved dataset don't require us to re-run de pipeline
610
+ if save_path.exists():
611
+ logger.info(f"{save_path} already exists. Exiting early.")
612
+ return
613
+
614
+ # load_dataset
615
+ logger.info(f"Loading {args.dataset_path}")
616
+ if args.dataset_path in CATALOGUE_DATASETS:
617
+ ds = load_from_disk(Path(args.dataset_path) / "final")
618
+ elif args.dataset_path in OSCAR_DATASETS:
619
+ ds = load_from_disk(args.dataset_path)
620
+ else:
621
+ raise NotImplementedError
622
+
623
+ # remove all columns except text
624
+ logger.info(f"Removing all columns except `text`")
625
+ columns_to_remove = set(ds.column_names)
626
+ columns_to_remove.remove("text")
627
+ ds = ds.remove_columns(list(columns_to_remove))
628
+
629
+ # sample dataset according to ratio
630
+ logger.info(f"Sampling dataset according to given ratio")
631
+ if args.dataset_path in CATALOGUE_DATASETS:
632
+ ds = sample_dataset(ds, CATALOGUE_DATASETS[args.dataset_path])
633
+ elif args.dataset_path in OSCAR_DATASETS:
634
+ ds = sample_dataset(ds, OSCAR_DATASETS[args.dataset_path])
635
+ else:
636
+ raise NotImplementedError
637
+
638
+ # save to json
639
+ logger.info(f"Saving to {save_path}")
640
+ tmp_save_path = Path(save_path.parent, f"tmp-{save_path.name}")
641
+ tmp_save_path.parent.mkdir(parents=True, exist_ok=True)
642
+ ds.to_json(
643
+ tmp_save_path,
644
+ num_proc=args.num_proc,
645
+ batch_size=args.batch_size
646
+ )
647
+ tmp_save_path.rename(save_path)
648
+
649
+ if __name__ == "__main__":
650
+ main()
data/catalogue/training_dataset_ratios.json ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-tum/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-tum_text_document",
4
+ "ratio": 1.6908712906621913e-07
5
+ },
6
+ {
7
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ki/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ki_text_document",
8
+ "ratio": 3.825549510416119e-07
9
+ },
10
+ {
11
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-bm/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-bm_text_document",
12
+ "ratio": 4.096353790340665e-07
13
+ },
14
+ {
15
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ak/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ak_text_document",
16
+ "ratio": 6.938762268345196e-07
17
+ },
18
+ {
19
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ts/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ts_text_document",
20
+ "ratio": 7.058099259657895e-07
21
+ },
22
+ {
23
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-st/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-st_text_document",
24
+ "ratio": 7.211293983608406e-07
25
+ },
26
+ {
27
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ny/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ny_text_document",
28
+ "ratio": 1.0802142612678056e-06
29
+ },
30
+ {
31
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-tw/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-tw_text_document",
32
+ "ratio": 1.254210185754728e-06
33
+ },
34
+ {
35
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-tn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-tn_text_document",
36
+ "ratio": 1.4345976209947068e-06
37
+ },
38
+ {
39
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ln/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ln_text_document",
40
+ "ratio": 1.5615957880343798e-06
41
+ },
42
+ {
43
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-nso/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-nso_text_document",
44
+ "ratio": 1.5685487152725018e-06
45
+ },
46
+ {
47
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-fon/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-fon_text_document",
48
+ "ratio": 2.4181733912998574e-06
49
+ },
50
+ {
51
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-rn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-rn_text_document",
52
+ "ratio": 2.6240721674330045e-06
53
+ },
54
+ {
55
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-wo/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-wo_text_document",
56
+ "ratio": 3.788035372978134e-06
57
+ },
58
+ {
59
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-lg/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-lg_text_document",
60
+ "ratio": 4.411728781061637e-06
61
+ },
62
+ {
63
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-sn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-sn_text_document",
64
+ "ratio": 5.462081443441739e-06
65
+ },
66
+ {
67
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-zu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-zu_text_document",
68
+ "ratio": 7.960494206222536e-06
69
+ },
70
+ {
71
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-ig/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-ig_text_document",
72
+ "ratio": 1.1476452755243308e-05
73
+ },
74
+ {
75
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-xh/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-xh_text_document",
76
+ "ratio": 1.420376509569156e-05
77
+ },
78
+ {
79
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-rw/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-rw_text_document",
80
+ "ratio": 3.211033678381048e-05
81
+ },
82
+ {
83
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-yo/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-yo_text_document",
84
+ "ratio": 5.92174680475419e-05
85
+ },
86
+ {
87
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-as/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-as_text_document",
88
+ "ratio": 0.00011018118094953408
89
+ },
90
+ {
91
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo-sw/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-sw_text_document",
92
+ "ratio": 0.00016215947420710153
93
+ },
94
+ {
95
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-or/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-or_text_document",
96
+ "ratio": 0.00035919185161466504
97
+ },
98
+ {
99
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-gu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-gu_text_document",
100
+ "ratio": 0.0004020242586250887
101
+ },
102
+ {
103
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-mr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-mr_text_document",
104
+ "ratio": 0.000501086367279419
105
+ },
106
+ {
107
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-pa/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-pa_text_document",
108
+ "ratio": 0.0005083234921794582
109
+ },
110
+ {
111
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/zht/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zht_text_document",
112
+ "ratio": 0.0005175821828332756
113
+ },
114
+ {
115
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-kn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-kn_text_document",
116
+ "ratio": 0.0006189102835836872
117
+ },
118
+ {
119
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ne/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ne_text_document",
120
+ "ratio": 0.0006671231017737995
121
+ },
122
+ {
123
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-te/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-te_text_document",
124
+ "ratio": 0.0009127953712102532
125
+ },
126
+ {
127
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ml/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ml_text_document",
128
+ "ratio": 0.0010332975205379394
129
+ },
130
+ {
131
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ur/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ur_text_document",
132
+ "ratio": 0.0012451803025486932
133
+ },
134
+ {
135
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/eu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_eu_text_document",
136
+ "ratio": 0.0015592580929764492
137
+ },
138
+ {
139
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ta/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ta_text_document",
140
+ "ratio": 0.002113263681784288
141
+ },
142
+ {
143
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-bn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-bn_text_document",
144
+ "ratio": 0.005491432072714383
145
+ },
146
+ {
147
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-hi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-hi_text_document",
148
+ "ratio": 0.007468936340601538
149
+ },
150
+ {
151
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/id/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_id_text_document",
152
+ "ratio": 0.010918042445283764
153
+ },
154
+ {
155
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/ca/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ca_text_document",
156
+ "ratio": 0.011240177446143296
157
+ },
158
+ {
159
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/vi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_vi_text_document",
160
+ "ratio": 0.024618602569261978
161
+ },
162
+ {
163
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/ar/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ar_text_document",
164
+ "ratio": 0.03306323144550797
165
+ },
166
+ {
167
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/pt/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_pt_text_document",
168
+ "ratio": 0.049537176530748885
169
+ },
170
+ {
171
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/oscar_zh/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_oscar_zh_text_document",
172
+ "ratio": 0.0553990566108835
173
+ },
174
+ {
175
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/oscar_en/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_oscar_en_text_document",
176
+ "ratio": 0.08138409077754236
177
+ },
178
+ {
179
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/es/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_es_text_document",
180
+ "ratio": 0.10667605099879819
181
+ },
182
+ {
183
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/zhs/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zhs_text_document",
184
+ "ratio": 0.12089121520420677
185
+ },
186
+ {
187
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/code/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_code_text_document",
188
+ "ratio": 0.13024371838857235
189
+ },
190
+ {
191
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/fr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_fr_text_document",
192
+ "ratio": 0.13054073905636543
193
+ },
194
+ {
195
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/en/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_en_text_document",
196
+ "ratio": 0.2216634990836396
197
+ }
198
+ ]
data/catalogue/training_dataset_ratios_batch_0.json ADDED
The diff for this file is too large to render. See raw diff
 
data/catalogue/training_dataset_ratios_merged_nigercongo.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-as/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-as_text_document",
4
+ "ratio": 0.00011018118094953408
5
+ },
6
+ {
7
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-or/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-or_text_document",
8
+ "ratio": 0.00035919185161466504
9
+ },
10
+ {
11
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-gu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-gu_text_document",
12
+ "ratio": 0.0004020242586250887
13
+ },
14
+ {
15
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-mr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-mr_text_document",
16
+ "ratio": 0.000501086367279419
17
+ },
18
+ {
19
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-pa/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-pa_text_document",
20
+ "ratio": 0.0005083234921794582
21
+ },
22
+ {
23
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/zht/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zht_text_document",
24
+ "ratio": 0.0005175821828332756
25
+ },
26
+ {
27
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-kn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-kn_text_document",
28
+ "ratio": 0.0006189102835836872
29
+ },
30
+ {
31
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ne/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ne_text_document",
32
+ "ratio": 0.0006671231017737995
33
+ },
34
+ {
35
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-te/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-te_text_document",
36
+ "ratio": 0.0009127953712102532
37
+ },
38
+ {
39
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ml/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ml_text_document",
40
+ "ratio": 0.0010332975205379394
41
+ },
42
+ {
43
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ur/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ur_text_document",
44
+ "ratio": 0.0012451803025486932
45
+ },
46
+ {
47
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/eu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_eu_text_document",
48
+ "ratio": 0.0015592580929764492
49
+ },
50
+ {
51
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-ta/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ta_text_document",
52
+ "ratio": 0.002113263681784288
53
+ },
54
+ {
55
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-bn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-bn_text_document",
56
+ "ratio": 0.005491432072714383
57
+ },
58
+ {
59
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/indic-hi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-hi_text_document",
60
+ "ratio": 0.007468936340601538
61
+ },
62
+ {
63
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/id/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_id_text_document",
64
+ "ratio": 0.010918042445283764
65
+ },
66
+ {
67
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/ca/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ca_text_document",
68
+ "ratio": 0.011240177446143296
69
+ },
70
+ {
71
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/vi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_vi_text_document",
72
+ "ratio": 0.024618602569261978
73
+ },
74
+ {
75
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/ar/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ar_text_document",
76
+ "ratio": 0.03306323144550797
77
+ },
78
+ {
79
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/pt/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_pt_text_document",
80
+ "ratio": 0.049537176530748885
81
+ },
82
+ {
83
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/oscar_zh/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_oscar_zh_text_document",
84
+ "ratio": 0.0553990566108835
85
+ },
86
+ {
87
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/oscar_en/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_oscar_en_text_document",
88
+ "ratio": 0.08138409077754236
89
+ },
90
+ {
91
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/es/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_es_text_document",
92
+ "ratio": 0.10667605099879819
93
+ },
94
+ {
95
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/zhs/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zhs_text_document",
96
+ "ratio": 0.12089121520420677
97
+ },
98
+ {
99
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/code/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_code_text_document",
100
+ "ratio": 0.13024371838857235
101
+ },
102
+ {
103
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/fr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_fr_text_document",
104
+ "ratio": 0.13054073905636543
105
+ },
106
+ {
107
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/en/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_en_text_document",
108
+ "ratio": 0.2216634990836396
109
+ },
110
+ {
111
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v2/nigercongo/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo_text_document",
112
+ "ratio": 0.00031581334183345284
113
+ }
114
+ ]
data/catalogue/training_dataset_ratios_merged_nigercongo_v3.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/ar/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ar_text_document",
4
+ "ratio": 0.0330676168743166
5
+ },
6
+ {
7
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/ca/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_ca_text_document",
8
+ "ratio": 0.011242051312222764
9
+ },
10
+ {
11
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/code/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_code_text_document",
12
+ "ratio": 0.13027200903379185
13
+ },
14
+ {
15
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/en/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_en_text_document",
16
+ "ratio": 0.22171164529099704
17
+ },
18
+ {
19
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/es/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_es_text_document",
20
+ "ratio": 0.10667815627928671
21
+ },
22
+ {
23
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/eu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_eu_text_document",
24
+ "ratio": 0.0015595123898173287
25
+ },
26
+ {
27
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/fr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_fr_text_document",
28
+ "ratio": 0.13054018439603915
29
+ },
30
+ {
31
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/id/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_id_text_document",
32
+ "ratio": 0.01091803753667153
33
+ },
34
+ {
35
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-as/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-as_text_document",
36
+ "ratio": 0.00011021422347108609
37
+ },
38
+ {
39
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-bn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-bn_text_document",
40
+ "ratio": 0.005492381453597748
41
+ },
42
+ {
43
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-gu/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-gu_text_document",
44
+ "ratio": 0.0004021215011318779
45
+ },
46
+ {
47
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-hi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-hi_text_document",
48
+ "ratio": 0.007470068593492175
49
+ },
50
+ {
51
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-kn/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-kn_text_document",
52
+ "ratio": 0.0006190467776576425
53
+ },
54
+ {
55
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-ml/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ml_text_document",
56
+ "ratio": 0.0010335296343329384
57
+ },
58
+ {
59
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-mr/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-mr_text_document",
60
+ "ratio": 0.0005012010684646179
61
+ },
62
+ {
63
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-ne/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ne_text_document",
64
+ "ratio": 0.0006672772956128299
65
+ },
66
+ {
67
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-or/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-or_text_document",
68
+ "ratio": 0.00035928138344705506
69
+ },
70
+ {
71
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-pa/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-pa_text_document",
72
+ "ratio": 0.0005084433130291778
73
+ },
74
+ {
75
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-ta/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ta_text_document",
76
+ "ratio": 0.0021137328219915496
77
+ },
78
+ {
79
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-te/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-te_text_document",
80
+ "ratio": 0.0009129946225980253
81
+ },
82
+ {
83
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/indic-ur/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_indic-ur_text_document",
84
+ "ratio": 0.0012454301613725426
85
+ },
86
+ {
87
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/nigercongo-all/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_nigercongo-all_text_document",
88
+ "ratio": 0.00031588689199263235
89
+ },
90
+ {
91
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/oscar-en/meg_ds_bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_text_document",
92
+ "ratio": 0.08137213783015229
93
+ },
94
+ {
95
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/oscar-zh/meg_ds_bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_text_document",
96
+ "ratio": 0.055293935695898196
97
+ },
98
+ {
99
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/pt/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_pt_text_document",
100
+ "ratio": 0.04954150576361177
101
+ },
102
+ {
103
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/vi/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_vi_text_document",
104
+ "ratio": 0.02461641286531197
105
+ },
106
+ {
107
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/zhs/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zhs_text_document",
108
+ "ratio": 0.12091748245519074
109
+ },
110
+ {
111
+ "dataset_path": "/gpfswork/rech/six/commun/bigscience-training/merged-meg-ds_v3_pii/zht/bigscience-catalogue-data-dev_byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles_zht_text_document",
112
+ "ratio": 0.0005177025345001541
113
+ }
114
+ ]
data/mc4/README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mc4
2
+
3
+ ## Megatron pre-processed files
4
+
5
+
6
+ These are the megatron-ready mc4 files:
7
+
8
+ - 1.3TB: `$six_ALL_CCFRWORK/datasets-custom/mc4/mc4_preprocessing`
9
+
10
+ Should something get corrupted there is a backup:
11
+
12
+ - 1.3TB: `$six_ALL_CCFRSTORE/datasets-custom/mc4/mc4_preprocessing`
13
+
14
+ If files need to re-pre-processed, the original jsonl files are at:
15
+
16
+ - 186GB: `$six_ALL_CCFRSTORE/datasets-custom/mc4/mc4_sampled_raw`
17
+
18
+
19
+ ## How pre-processing was done
20
+
21
+ The pre-processing was done outside of JZ, and was downloaded from:
22
+
23
+ * [mc4_preprocessing](https://console.cloud.google.com/storage/browser/bigscience/mc4_preprocessing?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22)))
24
+ * [mc4_sampled_raw](https://console.cloud.google.com/storage/browser/bigscience/mc4_sampled_raw?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22)))
25
+
26
+ To download one needs to activate the already installed on JZ [google-cloud-sdk](../../jz/tools/google-cloud-sdk.md) and then use `gsutil` as instructed at the `Download` tab in the links above.
data/openwebtext/openwebtext-to-jsonl.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # generate jsonl version of dataset that can be fed to megatron-lm preprocessor
4
+ #
5
+ # full dataset
6
+ # ./openwebtext-to-jsonl.py
7
+ #
8
+ # 10k small dataset
9
+ # ./openwebtext-to-jsonl.py -10k
10
+
11
+ import sys
12
+ from datasets import load_dataset
13
+
14
+ if "-10k" in sys.argv:
15
+ dataset_name = "stas/openwebtext-10k"
16
+ else:
17
+ dataset_name = "openwebtext"
18
+
19
+ name = dataset_name.split('/')[-1]
20
+ ds = load_dataset(dataset_name, split='train')
21
+ ds.to_json(f"{name}.jsonl", orient="records", lines=True)
22
+
23
+ # subset to jsonlines
24
+ # n_samples = 1000
25
+ # ds = load_dataset(dataset_name, split='train')
26
+ # ds_small = ds.select(range(n_samples))
27
+ # path = f"{dataset_name}-{n_samples}.jsonl"
28
+ # ds_small.to_json(path, orient="records", lines=True)
data/oscar-multilingual/README.md ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OSCAR
2
+
3
+
4
+ ## Megatron pre-processed files
5
+
6
+ These are the megatron-ready OSCAR files:
7
+
8
+ - Full 300M version (529GB) : `$six_ALL_CCFRWORK/datasets-custom/oscar-en`
9
+ - Tiny 10K version (56M): `$six_ALL_CCFRWORK/datasets-custom/oscar-en-10k`
10
+
11
+ Each folder contains: `meg-gpt2_text_document.bin` and `meg-gpt2_text_document.idx` and Megatron-LM training script expects the following argument:
12
+ ```
13
+ --data-path $six_ALL_CCFRWORK/datasets-custom/oscar-en/meg-gpt2_text_document
14
+ ```
15
+
16
+ Should something get corrupted there is a backup:
17
+
18
+ - Full 300M version (529GB) : `$six_ALL_CCFRSTORE/datasets-custom/oscar-en`
19
+ - Tiny 10K version (56M): `$six_ALL_CCFRSTORE/datasets-custom/oscar-en-10k`
20
+
21
+
22
+
23
+
24
+ ## How pre-processing was done
25
+
26
+ In general the process is to first generate jsonl version of the dataset, while filtering out entries smaller than 1K, and then run that jsonl data through Megatron-LM preprocessing tool.
27
+
28
+ The rest of this document is the step by step process of accomplishing that in an efficient way.
29
+
30
+ **Update: Now that we better understand Megatron-LM's dataloader we know that it contacts all docs on the fly and delivers seqlen at a time as a single sample ([reference](https://github.com/NVIDIA/Megatron-LM/blob/90e0a0dd08159e1c95f4f9d99bb8687f327d36c3/megatron/data/gpt_dataset.py#L169-L185). So we don't need to filter out docs that are shorter than seqlen. Therefore in the future runs. We should adjust `oscar-to-jsonl.py` to remove the filtering.**
31
+
32
+ 1. Convert `datasets` to `jsonl` which is the format required by Megatron-LM
33
+
34
+ The main script is [oscar-to-jsonl.py](./oscar-multilingual-to-jsonl.py). Edit to change languages to use, initially using just English.
35
+
36
+ Note, that since shuffling slows the writeout process by 5-7 times, we don't shuffle in the script, but post-process it externally. See step 3.
37
+
38
+ To launch: [oscar-to-jsonl.slurm](./oscar-multilingual-to-jsonl.slurm).
39
+
40
+ With "unshuffled_deduplicated_en" after filtering large entries (`>=1024`) we end up with 70754K examples out of 304230K total (about 1/4th of the full dataset).
41
+
42
+ The result is 5 files `oscar-[0-4].jsonl` of about 250GB each.
43
+
44
+ Runtime: 2-3h to download, ~2h to build, ~8h to filter, ~1.5h to write shards out
45
+
46
+
47
+ 2. Concatenate
48
+
49
+ ```
50
+ cat oscar-[0-4].jsonl > oscar-en.jsonl
51
+ ```
52
+
53
+ This gives us a 1.2TB file.
54
+
55
+ Check:
56
+ ```
57
+ $ wc -l oscar-en.jsonl
58
+ 304230423 oscar-en.jsonl
59
+ ```
60
+
61
+ Runtime: a few minutes
62
+
63
+
64
+
65
+ 3. Shuffle
66
+
67
+ Megatron requires users to do their own shuffling of jsonl input.
68
+
69
+ It was too slow to do inside the filtering script, so we are using a post-processing solution.
70
+ Using https://github.com/alexandres/terashuf and 150GB RAM in ~1.5h we shuffle the file.
71
+
72
+ Important: note that the slurm job uses SCRATCH for `TMPDIR` and also sets the memory limit it can use to 150.0 (GB) (slightly under 160GB available on this slurm allocation to allow for other processes).
73
+
74
+ To launch: [oscar-fast-shuffle.slurm](./oscar-fast-shuffle.slurm)
75
+
76
+ `terashuf` is in `$six_ALL_CCFRWORK/bin/terashuf`
77
+
78
+ The result is `oscar-shuffled.jsonl`
79
+
80
+ Runtime: 2h
81
+
82
+
83
+
84
+ 4. Megatron-LM preprocess
85
+
86
+ **Update**: that was an error, we can actually run for 100h on `-p cpu_p1` and so the normal script can complete no problem, but as a result of this mistake we can now pre-process data much faster.
87
+
88
+ We only have 20h to do processing which is not enough to process 300M records. Trying to do the whole thing in one preprocessing script took more than 24h and thus failed. Adding more than 16 workers didn't speed things up.
89
+
90
+ So we are splitting it in 4 chunks of ~80M records
91
+
92
+ ```
93
+ split -l 77000000 oscar-en-shuffled.jsonl oscar
94
+ mv oscaraa oscar-en-shuffled-p1.jsonl
95
+ mv oscarab oscar-en-shuffled-p2.jsonl
96
+ mv oscarac oscar-en-shuffled-p3.jsonl
97
+ mv oscarad oscar-en-shuffled-p4.jsonl
98
+ ```
99
+
100
+ We do the pre-processing:
101
+
102
+ The main script to launch: [oscar-jsonl-to-meg-gpt2.slurm](./oscar-jsonl-to-meg.slurm), and we need to make copies of it for each chunk:
103
+
104
+ ```
105
+ cp oscar-jsonl-to-meg-gpt2.slurm oscar-jsonl-to-meg-gpt2-1.slurm
106
+ cp oscar-jsonl-to-meg-gpt2.slurm oscar-jsonl-to-meg-gpt2-2.slurm
107
+ cp oscar-jsonl-to-meg-gpt2.slurm oscar-jsonl-to-meg-gpt2-3.slurm
108
+ cp oscar-jsonl-to-meg-gpt2.slurm oscar-jsonl-to-meg-gpt2-4.slurm
109
+ perl -pi -e 's|p1|p1|' oscar-jsonl-to-meg-gpt2-1.slurm
110
+ perl -pi -e 's|p1|p2|' oscar-jsonl-to-meg-gpt2-2.slurm
111
+ perl -pi -e 's|p1|p3|' oscar-jsonl-to-meg-gpt2-3.slurm
112
+ perl -pi -e 's|p1|p4|' oscar-jsonl-to-meg-gpt2-4.slurm
113
+ ```
114
+
115
+ ```
116
+ sbatch oscar-jsonl-to-meg-gpt2-1.slurm
117
+ sbatch oscar-jsonl-to-meg-gpt2-2.slurm
118
+ sbatch oscar-jsonl-to-meg-gpt2-3.slurm
119
+ sbatch oscar-jsonl-to-meg-gpt2-4.slurm
120
+ ```
121
+
122
+ This took about 6h each but run in parallel on different instances. This is surprisingly the projected time for the initial attempt to run in in one chunk, which was projected to 24 hours, and couldn't fit into 20h cap. So we finished the whole thing in 6 hours.
123
+
124
+ Outcome:
125
+
126
+ ```
127
+ $ ls -1sh meg-gpt2-p*
128
+ 131G meg-gpt2-p1_text_document.bin
129
+ 1.4G meg-gpt2-p1_text_document.idx
130
+ 131G meg-gpt2-p2_text_document.bin
131
+ 1.4G meg-gpt2-p2_text_document.idx
132
+ 131G meg-gpt2-p3_text_document.bin
133
+ 1.4G meg-gpt2-p3_text_document.idx
134
+ 138G meg-gpt2-p4_text_document.bin
135
+ 1.5G meg-gpt2-p4_text_document.idx
136
+ ```
137
+
138
+ Next merging: [oscar-meg-gpt2-merge.slurm](./oscar-meg-gpt2-merge.slurm)
139
+
140
+ Runtime: 22min - needed 26GB RSS RAM
141
+
142
+ Outcome: 304_230_423 records
143
+
144
+ ```
145
+ $ ls -1sh meg-gpt2_text_document.*
146
+ 529G meg-gpt2_text_document.bin
147
+ 5.7G meg-gpt2_text_document.idx
148
+ ```
149
+
150
+ Total runtime: under 7h.
151
+
152
+ Let's also make a small 10k version for experiments:
153
+
154
+ ```
155
+ head -10000 oscar-shuffled.jsonl > oscar-shuffled-10k.jsonl
156
+ ```
157
+ and then process with the same slurm script above, but changing the input to `oscar-shuffled-10k.jsonl`
158
+
159
+
160
+
161
+ 5. Final destination
162
+
163
+ We did all the processing on the SCRATCH partition which gets wiped out every 30 days, so we need to move the files to where they will not be deleted.
164
+
165
+ Since at this moment we used just the English part of the OSCAR dataset, let's include that in the folder name to differentiate from other builds that will be multi-lingual.
166
+
167
+ Make the final result which will be used by the megatron training script available on the persistent WORK partition:
168
+
169
+ ```
170
+ mkdir oscar-en
171
+ mv meg-gpt2_text_document.* oscar-en
172
+ cp -r oscar-en $six_ALL_CCFRWORK/datasets-custom
173
+ ```
174
+
175
+ Back it up to STORE:
176
+
177
+ It's already binary and just 2 files, so no need to tar (STORE has limited inodes)
178
+ ```
179
+ mkdir -p $six_ALL_CCFRSTORE/datasets-custom
180
+ cp -r oscar-en $six_ALL_CCFRSTORE/datasets-custom
181
+ ```
182
+
183
+ Also copy the small version for experiments to WORK and STORE:
184
+ ```
185
+ cp -r oscar-en-10k $six_ALL_CCFRWORK/datasets-custom
186
+ cp -r oscar-en-10k $six_ALL_CCFRSTORE/datasets-custom
187
+ ```
188
+
189
+ Tar/gz `oscar-shuffled.jsonl` and the dataset files to STORE:
190
+
191
+ ```
192
+
193
+
194
+ ```
195
+
196
+ 6. Estimate total number of tokens
197
+
198
+ Make a 1GB slice:
199
+ ```
200
+ $ head -79000 oscar-en-shuffled.jsonl > oscar-1GB.jsonl
201
+ $ ls -sh oscar-1GB.jsonl
202
+ 1.0G oscar-1GB.jsonl
203
+ ```
204
+
205
+ Analyze it (low mem-footprint):
206
+ ```
207
+ $ python -c "import json, sys; \
208
+ from transformers import GPT2TokenizerFast; \
209
+ tokenizer = GPT2TokenizerFast.from_pretrained('gpt2'); \
210
+ print(sum(tokenizer(json.loads(l)['text'], return_length=True).length[0] for l in sys.stdin.readlines()))" < oscar-1GB.jsonl
211
+ 234260484
212
+ ```
213
+
214
+ Extrapolate:
215
+
216
+ Thus 234M tokens in 1GB, ~280B tokens in 1.2TB (`234*1200`)
217
+
218
+ Incidentally this coincides with @Yozh's `FILE_SIZE_IN_GBS/4.5` formula! (average 4.5chars per word)
data/oscar-multilingual/download-oscars.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # generate jsonl version of dataset that can be fed to megatron-lm pre-processor
4
+ #
5
+ # see various notes in the scripts for different options
6
+ #
7
+ # full dataset:
8
+ # ./oscar-multilingual-to-jsonl.py
9
+ # cat oscar-[0-4].jsonl > oscar.jsonl
10
+ #
11
+ # small dataset (0.1%):
12
+ # ./oscar-multilingual-to-jsonl.py -s
13
+ # cat oscar-[0-4].jsonl > oscar.jsonl
14
+
15
+ import logging
16
+ import os
17
+
18
+ import datasets
19
+
20
+ print(f"Using datasets=={datasets.__version__}")
21
+
22
+ DATASET_NAME = "oscar"
23
+ logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
24
+
25
+ ### Build/Load Datasets
26
+
27
+ # Once this part of the process completes it gets cached, so on subsequent runs it'll be much faster
28
+
29
+ language_subsets = {
30
+ "unshuffled_deduplicated_ar",
31
+ "unshuffled_deduplicated_sw",
32
+ "unshuffled_deduplicated_zh",
33
+ # "unshuffled_deduplicated_en",
34
+ "unshuffled_deduplicated_fr",
35
+ "unshuffled_deduplicated_pt",
36
+ "unshuffled_deduplicated_es",
37
+ "unshuffled_deduplicated_ja",
38
+ "unshuffled_deduplicated_ru",
39
+ "unshuffled_deduplicated_hi",
40
+ "unshuffled_deduplicated_ur",
41
+ "unshuffled_deduplicated_bn",
42
+ "unshuffled_deduplicated_id",
43
+ "unshuffled_deduplicated_am",
44
+ "unshuffled_deduplicated_ca",
45
+ }
46
+
47
+ for language_subset in language_subsets:
48
+ builder = datasets.load_dataset_builder(DATASET_NAME, language_subset, cache_dir='cache')
49
+ if not os.path.isdir(builder.cache_dir):
50
+ builder.download_and_prepare(ignore_verifications=True)
data/oscar-multilingual/download-oscars.slurm ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=download-oscars # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=compil
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ # use SCRATCH for building as it's much faster
17
+ cd $six_ALL_CCFRSCRATCH/datasets/oscar-multilingual
18
+ python $SCRATCH/bigscience/data/oscar-multilingual/download-oscars.py
data/oscar-multilingual/oscar-fast-shuffle.slurm ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=oscar-fast-shuffle # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1 # number of nodes
5
+ #SBATCH --cpus-per-task=40 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu # allocation account
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ # must set tmp to SCRATCH to be fast
17
+ export TMPDIR=$six_ALL_CCFRSCRATCH/tmp
18
+ mkdir -p $TMPDIR
19
+
20
+ # memory to use in GBs float
21
+ export MEMORY=150.0
22
+
23
+ input=oscar-en.jsonl
24
+ output=oscar-en-shuffled.jsonl
25
+
26
+ cd $six_ALL_CCFRSCRATCH/datasets/oscar-small
27
+ /usr/bin/time -v $six_ALL_CCFRWORK/bin/terashuf < $input > $output
data/oscar-multilingual/oscar-jsonl-to-meg.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # the $1 argument can be "equal" or "alpha" to choose the corresponding tokenizer
3
+ # no en in this script; we've mostly processed it on GCP
4
+ for language in fr es zh hi ur bn id ca ar pt vi eu
5
+ do
6
+ sbatch $ALL_CCFRWORK/code/bigscience/data/oscar-multilingual/oscar-jsonl-to-meg.slurm $language $1
7
+ done
data/oscar-multilingual/oscar-jsonl-to-meg.slurm ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=oscar-jsonl-to-meg-equal # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --qos=qos_cpu-t3
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --account=six@cpu
11
+ #SBATCH --partition=cpu_p1
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-prod
16
+ export HF_DATASETS_OFFLINE=1
17
+ export TRANSFORMERS_OFFLINE=1
18
+
19
+ input=$six_ALL_CCFRWORK/datasets-custom/oscar-multilingual/oscar_${1}.jsonl
20
+ output=$six_ALL_CCFRWORK/datasets-custom/oscar-multilingual-${2}-tok/oscar_${1}_${2}
21
+
22
+ cd $ALL_CCFRWORK/code/Megatron-DeepSpeed
23
+ /usr/bin/time -v python tools/preprocess_data.py \
24
+ --input $input \
25
+ --output-prefix $output \
26
+ --dataset-impl mmap \
27
+ --tokenizer-type PretrainedFromHF \
28
+ --tokenizer-name-or-path bigscience/oscar_13_languages_${2}_weight \
29
+ --append-eod \
30
+ --workers 25
data/oscar-multilingual/oscar-meg-gpt2-merge.slurm ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=oscar-meg-gpt2-merge.slurm # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ cd $six_ALL_CCFRWORK/code/Megatron-DeepSpeed
17
+ DATA=$six_ALL_CCFRSCRATCH/datasets/oscar-multilingual
18
+ /usr/bin/time -v python tools/merge_preprocessed_data.py \
19
+ --datasets \
20
+ $DATA/meg-gpt2-p1_text_document \
21
+ $DATA/meg-gpt2-p2_text_document \
22
+ $DATA/meg-gpt2-p3_text_document \
23
+ $DATA/meg-gpt2-p4_text_document \
24
+ --output-prefix $DATA/meg-gpt2_text_document
data/oscar-multilingual/oscar-multilingual-to-jsonl.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #
3
+ # generate jsonl version of dataset that can be fed to megatron-lm pre-processor
4
+ #
5
+ # see various notes in the scripts for different options
6
+ #
7
+ # full dataset:
8
+ # ./oscar-multilingual-to-jsonl.py
9
+ # cat oscar-[0-4].jsonl > oscar.jsonl
10
+ #
11
+ # small dataset (0.1%):
12
+ # ./oscar-multilingual-to-jsonl.py -s
13
+ # cat oscar-[0-4].jsonl > oscar.jsonl
14
+
15
+ import logging
16
+ from argparse import ArgumentParser
17
+ from multiprocessing import Process, Queue
18
+
19
+ from datasets import load_dataset, ReadInstruction
20
+
21
+ import datasets
22
+
23
+ print(f"Using datasets=={datasets.__version__}")
24
+
25
+ DATASET_NAME = "oscar"
26
+
27
+ logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
28
+
29
+ parser = ArgumentParser()
30
+ parser.add_argument('-s', '--subset', action='store_true', help='Process and save a subset (0.1%) of data')
31
+ args = parser.parse_args()
32
+
33
+ # Once this part of the process runs it gets cached, so on subsequent runs it'll be much faster
34
+
35
+ split = ReadInstruction("train", to=0.1 if args.subset else 100, unit="%")
36
+
37
+ ### Build/Load Datasets
38
+
39
+ # Once this part of the process completes it gets cached, so on subsequent runs it'll be much faster
40
+
41
+ language_subsets = {
42
+ "unshuffled_deduplicated_hi",
43
+ "unshuffled_deduplicated_ur",
44
+ "unshuffled_deduplicated_bn",
45
+ "unshuffled_deduplicated_id",
46
+ "unshuffled_deduplicated_ca",
47
+ "unshuffled_deduplicated_eu",
48
+ "unshuffled_deduplicated_ar",
49
+ "unshuffled_deduplicated_sw",
50
+ "unshuffled_deduplicated_zh",
51
+ "unshuffled_deduplicated_en",
52
+ "unshuffled_deduplicated_fr",
53
+ "unshuffled_deduplicated_pt",
54
+ "unshuffled_deduplicated_es",
55
+ "unshuffled_deduplicated_vi",
56
+ }
57
+ sharded_languages = {
58
+ "unshuffled_deduplicated_en",
59
+ "unshuffled_deduplicated_ru",
60
+ "unshuffled_deduplicated_de",
61
+ "unshuffled_deduplicated_es",
62
+ "unshuffled_deduplicated_fr",
63
+ "unshuffled_deduplicated_ja",
64
+ "unshuffled_deduplicated_zh",
65
+ }
66
+
67
+ ### Save jsonl
68
+
69
+ # important: shuffling makes the process 5-7 times slower! best to shuffle the end jsonl file using
70
+ # https://github.com/alexandres/terashuf (should take ~1h to shuffle 900GB file with 70M records
71
+ # using 150GB RAM)
72
+
73
+ # version 1: one writer - quite slow
74
+ #shuffled_dataset = filtered_dataset.shuffle()
75
+ #shuffled_dataset = filtered_dataset
76
+ #shuffled_dataset.to_json(f"{DATASET_NAME}.jsonl", orient="records", lines=True, force_ascii=False)
77
+
78
+ # version 2: multiple parallel sharded writes
79
+ # much faster, but will require concatenation at the end
80
+ # 10 shards proved to much for the instance and 3 processed were killed, 5 worked well
81
+ # took about 1.5h per shard
82
+
83
+ N_SHARDS = 5
84
+ def process_shard(dataset, n_shards, idx, language_subset):
85
+ if n_shards > 1:
86
+ print(f"Sharding {idx}")
87
+ ds_shard = dataset.shard(n_shards, idx, contiguous=True)
88
+ # shuffle will make things much much slower
89
+ #ds_shard = ds_shard.shuffle() # remove contiguous=True above if shuffling
90
+ else:
91
+ ds_shard = dataset
92
+ print(f"Saving {DATASET_NAME}-{language_subset}-{idx}.jsonl")
93
+ export_filename = f"{DATASET_NAME}-{language_subset}-{idx}.jsonl" if n_shards > 1 else \
94
+ f"{DATASET_NAME}-{language_subset}.jsonl"
95
+ ds_shard.to_json(export_filename, orient="records", lines=True, force_ascii=False)
96
+
97
+ for language_subset in language_subsets:
98
+ dataset = load_dataset(DATASET_NAME, language_subset, split=split, keep_in_memory=False, ignore_verifications=True)
99
+ n_shards = N_SHARDS if language_subset in sharded_languages else 1
100
+ queue = Queue()
101
+ processes = [Process(target=process_shard, args=(dataset, n_shards, idx, language_subset,)) for idx in range(n_shards)]
102
+ for p in processes:
103
+ p.start()
104
+
105
+ for p in processes:
106
+ p.join()
data/oscar-multilingual/oscar-to-backup-tgz.slurm ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=oscar-to-backup-tgz # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=10 # number of cores per task
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --partition=cpu_p1
10
+ #SBATCH --account=six@cpu
11
+ #SBATCH --qos=qos_cpu-t4
12
+
13
+ # 20h is not enough to gzip 1.2TB file, so have to use the other allocation
14
+ ##SBATCH --partition=archive
15
+
16
+
17
+ set -x -e
18
+
19
+ cd $six_ALL_CCFRSCRATCH/datasets/oscar-small
20
+
21
+ # plain text -> gz
22
+ gzip oscar-en-shuffled.jsonl
23
+ mv oscar-en-shuffled.jsonl.gz $six_ALL_CCFRSTORE/datasets/
24
+
25
+ # already binary -> tar
26
+ tar -cvf $six_ALL_CCFRSTORE/datasets/oscar-en-cache.tar cache
data/p3/prepare_p3.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import os
3
+ import multiprocessing
4
+ from datasets import load_dataset, load_from_disk
5
+ import jsonlines
6
+
7
+ """Get task list:
8
+ !git clone https://github.com/bigscience-workshop/t-zero.git
9
+ %cd t-zero
10
+ !pip install -e .[seqio_tasks]
11
+ !pip install -q py7zr
12
+
13
+ import t0.seqio_tasks
14
+ import seqio
15
+ tasks = [task.name for task in seqio.MixtureRegistry.get('t0_train').tasks]
16
+ print(tasks)
17
+
18
+
19
+
20
+ After running the script, merge train & validation jsonls separately into two big files:
21
+ cat folder_with_all_jsonl/*.jsonl > merged_file.jsonl
22
+ """
23
+ TZERO_TASK_LIST = [
24
+ 'adversarial_qa_dbert_answer_the_following_q',
25
+ 'adversarial_qa_dbert_based_on',
26
+ 'adversarial_qa_dbert_generate_question',
27
+ 'adversarial_qa_dbert_question_context_answer',
28
+ 'adversarial_qa_dbert_tell_what_it_is',
29
+ 'adversarial_qa_dbidaf_answer_the_following_q',
30
+ 'adversarial_qa_dbidaf_based_on',
31
+ 'adversarial_qa_dbidaf_generate_question',
32
+ 'adversarial_qa_dbidaf_question_context_answer',
33
+ 'adversarial_qa_dbidaf_tell_what_it_is',
34
+ 'adversarial_qa_droberta_answer_the_following_q',
35
+ 'adversarial_qa_droberta_based_on',
36
+ 'adversarial_qa_droberta_generate_question',
37
+ 'adversarial_qa_droberta_question_context_answer',
38
+ 'adversarial_qa_droberta_tell_what_it_is',
39
+ 'ag_news_classify',
40
+ 'ag_news_classify_question_first',
41
+ 'ag_news_classify_with_choices',
42
+ 'ag_news_classify_with_choices_question_first',
43
+ 'ag_news_recommend',
44
+ 'ag_news_which_section',
45
+ 'ag_news_which_section_choices',
46
+ 'amazon_polarity_Is_this_product_review_positive',
47
+ 'amazon_polarity_Is_this_review',
48
+ 'amazon_polarity_Is_this_review_negative',
49
+ 'amazon_polarity_User_recommend_this_product',
50
+ 'amazon_polarity_convey_negative_or_positive_sentiment',
51
+ 'amazon_polarity_flattering_or_not',
52
+ 'amazon_polarity_negative_or_positive_tone',
53
+ 'amazon_polarity_user_satisfied',
54
+ 'amazon_polarity_would_you_buy',
55
+ 'app_reviews_categorize_rating_using_review',
56
+ 'app_reviews_convert_to_rating',
57
+ 'app_reviews_convert_to_star_rating',
58
+ 'app_reviews_generate_review',
59
+ 'cnn_dailymail_3.0.0_2_or_3_sentences',
60
+ 'cnn_dailymail_3.0.0_generate_story',
61
+ 'cnn_dailymail_3.0.0_news_card_view',
62
+ 'cnn_dailymail_3.0.0_news_stock',
63
+ 'cnn_dailymail_3.0.0_news_summary',
64
+ 'cnn_dailymail_3.0.0_spice_up_story',
65
+ 'cnn_dailymail_3.0.0_sum_in_brief',
66
+ 'cnn_dailymail_3.0.0_tldr_summary',
67
+ 'cnn_dailymail_3.0.0_write_an_outline',
68
+ 'common_gen_Example_prompt',
69
+ 'common_gen_Given_concepts_type_1',
70
+ 'common_gen_Given_concepts_type_2',
71
+ 'common_gen_Put_together',
72
+ 'common_gen_choice_in_concept_centric_sentence_generation',
73
+ 'common_gen_random_task_template_prompt',
74
+ 'common_gen_sentence_to_concepts',
75
+ 'common_gen_topic_to_sentence',
76
+ 'common_gen_topics_from_the_sentence',
77
+ 'cos_e_v1.11_aligned_with_common_sense',
78
+ 'cos_e_v1.11_description_question_option_id',
79
+ 'cos_e_v1.11_description_question_option_text',
80
+ 'cos_e_v1.11_explain_why_human',
81
+ 'cos_e_v1.11_generate_explanation_given_text',
82
+ 'cos_e_v1.11_i_think',
83
+ 'cos_e_v1.11_question_description_option_id',
84
+ 'cos_e_v1.11_question_description_option_text',
85
+ 'cos_e_v1.11_question_option_description_id',
86
+ 'cos_e_v1.11_question_option_description_text',
87
+ 'cos_e_v1.11_rationale',
88
+ 'cosmos_qa_context_answer_to_question',
89
+ 'cosmos_qa_context_description_question_answer_id',
90
+ 'cosmos_qa_context_description_question_answer_text',
91
+ 'cosmos_qa_context_description_question_text',
92
+ 'cosmos_qa_context_question_description_answer_id',
93
+ 'cosmos_qa_context_question_description_answer_text',
94
+ 'cosmos_qa_context_question_description_text',
95
+ 'cosmos_qa_description_context_question_answer_id',
96
+ 'cosmos_qa_description_context_question_answer_text',
97
+ 'cosmos_qa_description_context_question_text',
98
+ 'cosmos_qa_no_prompt_id',
99
+ 'cosmos_qa_no_prompt_text',
100
+ 'cosmos_qa_only_question_answer',
101
+ 'dbpedia_14_given_a_choice_of_categories_',
102
+ 'dbpedia_14_given_a_list_of_category_what_does_the_title_belong_to',
103
+ 'dbpedia_14_given_list_what_category_does_the_paragraph_belong_to',
104
+ 'dbpedia_14_pick_one_category_for_the_following_text',
105
+ 'dream_answer_to_dialogue',
106
+ 'dream_baseline',
107
+ 'dream_generate_first_utterance',
108
+ 'dream_generate_last_utterance',
109
+ 'dream_read_the_following_conversation_and_answer_the_question',
110
+ 'duorc_ParaphraseRC_answer_question',
111
+ 'duorc_ParaphraseRC_build_story_around_qa',
112
+ 'duorc_ParaphraseRC_decide_worth_it',
113
+ 'duorc_ParaphraseRC_extract_answer',
114
+ 'duorc_ParaphraseRC_generate_question',
115
+ 'duorc_ParaphraseRC_generate_question_by_answer',
116
+ 'duorc_ParaphraseRC_movie_director',
117
+ 'duorc_ParaphraseRC_question_answering',
118
+ 'duorc_ParaphraseRC_title_generation',
119
+ 'duorc_SelfRC_answer_question',
120
+ 'duorc_SelfRC_build_story_around_qa',
121
+ 'duorc_SelfRC_decide_worth_it',
122
+ 'duorc_SelfRC_extract_answer',
123
+ 'duorc_SelfRC_generate_question',
124
+ 'duorc_SelfRC_generate_question_by_answer',
125
+ 'duorc_SelfRC_movie_director',
126
+ 'duorc_SelfRC_question_answering',
127
+ 'duorc_SelfRC_title_generation',
128
+ 'gigaword_TLDR',
129
+ 'gigaword_first_sentence_title',
130
+ 'gigaword_generate_summary_for_this',
131
+ 'gigaword_in_a_nutshell',
132
+ 'gigaword_make_a_title',
133
+ 'gigaword_reverse_writing',
134
+ 'gigaword_write_a_title_for_this_sentence',
135
+ 'gigaword_write_an_article',
136
+ 'gigaword_write_its_sentence',
137
+ 'glue_mrpc_equivalent',
138
+ 'glue_mrpc_generate_paraphrase',
139
+ 'glue_mrpc_generate_sentence',
140
+ 'glue_mrpc_paraphrase',
141
+ 'glue_mrpc_replace',
142
+ 'glue_mrpc_same_thing',
143
+ 'glue_mrpc_want_to_know',
144
+ 'glue_qqp_answer',
145
+ 'glue_qqp_duplicate',
146
+ 'glue_qqp_duplicate_or_not',
147
+ 'glue_qqp_meaning',
148
+ 'glue_qqp_quora',
149
+ 'glue_qqp_same_thing',
150
+ 'imdb_Movie_Expressed_Sentiment',
151
+ 'imdb_Movie_Expressed_Sentiment_2',
152
+ 'imdb_Negation_template_for_positive_and_negative',
153
+ 'imdb_Reviewer_Enjoyment',
154
+ 'imdb_Reviewer_Enjoyment_Yes_No',
155
+ 'imdb_Reviewer_Expressed_Sentiment',
156
+ 'imdb_Reviewer_Opinion_bad_good_choices',
157
+ 'imdb_Reviewer_Sentiment_Feeling',
158
+ 'imdb_Sentiment_with_choices_',
159
+ 'imdb_Text_Expressed_Sentiment',
160
+ 'imdb_Writer_Expressed_Sentiment',
161
+ 'kilt_tasks_hotpotqa_combining_facts',
162
+ 'kilt_tasks_hotpotqa_complex_question',
163
+ 'kilt_tasks_hotpotqa_final_exam',
164
+ 'kilt_tasks_hotpotqa_formulate',
165
+ 'kilt_tasks_hotpotqa_straighforward_qa',
166
+ 'multi_news_distill',
167
+ 'multi_news_expand_reverse_task_',
168
+ 'multi_news_summarize',
169
+ 'multi_news_summary_scenario',
170
+ 'multi_news_synthesize',
171
+ 'multi_news_what_are_the_key_points',
172
+ 'paws_labeled_final_Concatenation',
173
+ 'paws_labeled_final_Concatenation_no_label',
174
+ 'paws_labeled_final_Meaning',
175
+ 'paws_labeled_final_Meaning_no_label',
176
+ 'paws_labeled_final_PAWS_ANLI_GPT3',
177
+ 'paws_labeled_final_PAWS_ANLI_GPT3_no_label',
178
+ 'paws_labeled_final_Rewrite',
179
+ 'paws_labeled_final_Rewrite_no_label',
180
+ 'paws_labeled_final_context_question',
181
+ 'paws_labeled_final_context_question_no_label',
182
+ 'paws_labeled_final_paraphrase_task',
183
+ 'paws_labeled_final_task_description_no_label',
184
+ 'qasc_is_correct_1',
185
+ 'qasc_is_correct_2',
186
+ 'qasc_qa_with_combined_facts_1',
187
+ 'qasc_qa_with_separated_facts_1',
188
+ 'qasc_qa_with_separated_facts_2',
189
+ 'qasc_qa_with_separated_facts_3',
190
+ 'qasc_qa_with_separated_facts_4',
191
+ 'qasc_qa_with_separated_facts_5',
192
+ 'quail_context_description_question_answer_id',
193
+ 'quail_context_description_question_answer_text',
194
+ 'quail_context_description_question_text',
195
+ 'quail_context_question_answer_description_id',
196
+ 'quail_context_question_answer_description_text',
197
+ 'quail_context_question_description_answer_id',
198
+ 'quail_context_question_description_answer_text',
199
+ 'quail_context_question_description_text',
200
+ 'quail_description_context_question_answer_id',
201
+ 'quail_description_context_question_answer_text',
202
+ 'quail_description_context_question_text',
203
+ 'quail_no_prompt_id',
204
+ 'quail_no_prompt_text',
205
+ 'quarel_choose_between',
206
+ 'quarel_do_not_use',
207
+ 'quarel_heres_a_story',
208
+ 'quarel_logic_test',
209
+ 'quarel_testing_students',
210
+ 'quartz_answer_question_based_on',
211
+ 'quartz_answer_question_below',
212
+ 'quartz_given_the_fact_answer_the_q',
213
+ 'quartz_having_read_above_passage',
214
+ 'quartz_paragraph_question_plain_concat',
215
+ 'quartz_read_passage_below_choose',
216
+ 'quartz_use_info_from_paragraph_question',
217
+ 'quartz_use_info_from_question_paragraph',
218
+ 'quoref_Answer_Friend_Question',
219
+ 'quoref_Answer_Question_Given_Context',
220
+ 'quoref_Answer_Test',
221
+ 'quoref_Context_Contains_Answer',
222
+ 'quoref_Find_Answer',
223
+ 'quoref_Found_Context_Online',
224
+ 'quoref_Given_Context_Answer_Question',
225
+ 'quoref_Guess_Answer',
226
+ 'quoref_Guess_Title_For_Context',
227
+ 'quoref_Read_And_Extract_',
228
+ 'quoref_What_Is_The_Answer',
229
+ 'ropes_background_new_situation_answer',
230
+ 'ropes_background_situation_middle',
231
+ 'ropes_given_background_situation',
232
+ 'ropes_new_situation_background_answer',
233
+ 'ropes_plain_background_situation',
234
+ 'ropes_plain_bottom_hint',
235
+ 'ropes_plain_no_background',
236
+ 'ropes_prompt_beginning',
237
+ 'ropes_prompt_bottom_hint_beginning',
238
+ 'ropes_prompt_bottom_no_hint',
239
+ 'ropes_prompt_mix',
240
+ 'ropes_read_background_situation',
241
+ 'rotten_tomatoes_Movie_Expressed_Sentiment',
242
+ 'rotten_tomatoes_Movie_Expressed_Sentiment_2',
243
+ 'rotten_tomatoes_Reviewer_Enjoyment',
244
+ 'rotten_tomatoes_Reviewer_Enjoyment_Yes_No',
245
+ 'rotten_tomatoes_Reviewer_Expressed_Sentiment',
246
+ 'rotten_tomatoes_Reviewer_Opinion_bad_good_choices',
247
+ 'rotten_tomatoes_Reviewer_Sentiment_Feeling',
248
+ 'rotten_tomatoes_Sentiment_with_choices_',
249
+ 'rotten_tomatoes_Text_Expressed_Sentiment',
250
+ 'rotten_tomatoes_Writer_Expressed_Sentiment',
251
+ 'samsum_Generate_a_summary_for_this_dialogue',
252
+ 'samsum_Given_the_above_dialogue_write_a_summary',
253
+ 'samsum_Sum_up_the_following_dialogue',
254
+ 'samsum_Summarize_',
255
+ 'samsum_Summarize_this_dialogue_',
256
+ 'samsum_To_sum_up_this_dialog',
257
+ 'samsum_Write_a_dialogue_that_match_this_summary',
258
+ 'sciq_Direct_Question',
259
+ 'sciq_Direct_Question_Closed_Book_',
260
+ 'sciq_Multiple_Choice',
261
+ 'sciq_Multiple_Choice_Closed_Book_',
262
+ 'sciq_Multiple_Choice_Question_First',
263
+ 'social_i_qa_Check_if_a_random_answer_is_valid_or_not',
264
+ 'social_i_qa_Generate_answer',
265
+ 'social_i_qa_Generate_the_question_from_the_answer',
266
+ 'social_i_qa_I_was_wondering',
267
+ 'social_i_qa_Show_choices_and_generate_answer',
268
+ 'social_i_qa_Show_choices_and_generate_index',
269
+ 'trec_fine_grained_ABBR',
270
+ 'trec_fine_grained_ABBR_context_first',
271
+ 'trec_fine_grained_DESC',
272
+ 'trec_fine_grained_DESC_context_first',
273
+ 'trec_fine_grained_ENTY',
274
+ 'trec_fine_grained_HUM',
275
+ 'trec_fine_grained_HUM_context_first',
276
+ 'trec_fine_grained_LOC',
277
+ 'trec_fine_grained_LOC_context_first',
278
+ 'trec_fine_grained_NUM',
279
+ 'trec_fine_grained_NUM_context_first',
280
+ 'trec_fine_grained_open',
281
+ 'trec_fine_grained_open_context_first',
282
+ 'trec_pick_the_best_descriptor',
283
+ 'trec_trec1',
284
+ 'trec_trec2',
285
+ 'trec_what_category_best_describe',
286
+ 'trec_which_category_best_describes',
287
+ 'wiki_bio_comprehension',
288
+ 'wiki_bio_guess_person',
289
+ 'wiki_bio_key_content',
290
+ 'wiki_bio_what_content',
291
+ 'wiki_bio_who',
292
+ 'wiki_hop_original_choose_best_object_affirmative_1',
293
+ 'wiki_hop_original_choose_best_object_affirmative_2',
294
+ 'wiki_hop_original_choose_best_object_affirmative_3',
295
+ 'wiki_hop_original_choose_best_object_interrogative_1',
296
+ 'wiki_hop_original_choose_best_object_interrogative_2',
297
+ 'wiki_hop_original_explain_relation',
298
+ 'wiki_hop_original_generate_object',
299
+ 'wiki_hop_original_generate_subject',
300
+ 'wiki_hop_original_generate_subject_and_object',
301
+ 'wiki_qa_Decide_good_answer',
302
+ 'wiki_qa_Direct_Answer_to_Question',
303
+ 'wiki_qa_Generate_Question_from_Topic',
304
+ 'wiki_qa_Is_This_True_',
305
+ 'wiki_qa_Jeopardy_style',
306
+ 'wiki_qa_Topic_Prediction_Answer_Only',
307
+ 'wiki_qa_Topic_Prediction_Question_Only',
308
+ 'wiki_qa_Topic_Prediction_Question_and_Answer_Pair',
309
+ 'wiki_qa_automatic_system',
310
+ 'wiki_qa_exercise',
311
+ 'wiki_qa_found_on_google',
312
+ 'wiqa_does_the_supposed_perturbation_have_an_effect',
313
+ 'wiqa_effect_with_label_answer',
314
+ 'wiqa_effect_with_string_answer',
315
+ 'wiqa_what_is_the_final_step_of_the_following_process',
316
+ 'wiqa_what_is_the_missing_first_step',
317
+ 'wiqa_what_might_be_the_first_step_of_the_process',
318
+ 'wiqa_what_might_be_the_last_step_of_the_process',
319
+ 'wiqa_which_of_the_following_is_the_supposed_perturbation',
320
+ 'xsum_DOC_boils_down_to_simple_idea_that',
321
+ 'xsum_DOC_given_above_write_one_sentence',
322
+ 'xsum_DOC_how_would_you_rephrase_few_words',
323
+ 'xsum_DOC_tldr',
324
+ 'xsum_DOC_write_summary_of_above',
325
+ 'xsum_article_DOC_summary',
326
+ 'xsum_college_roommate_asked_DOC_so_I_recap',
327
+ 'xsum_read_below_DOC_write_abstract',
328
+ 'xsum_summarize_DOC',
329
+ 'xsum_summarize_this_DOC_summary',
330
+ 'yelp_review_full_based_on_that',
331
+ 'yelp_review_full_format_rating',
332
+ 'yelp_review_full_format_score',
333
+ 'yelp_review_full_format_star',
334
+ 'yelp_review_full_on_a_scale',
335
+ 'yelp_review_full_so_i_would',
336
+ 'yelp_review_full_this_place'
337
+ ]
338
+
339
+ # Optonally download all first
340
+ # for task_name in TZERO_TASK_LIST:
341
+ # ds = load_dataset("bigscience/P3", task_name)
342
+
343
+ def write_to_jsonl_hub(task_name, split):
344
+ # Could also use ds.to_json()
345
+ ds = load_dataset("bigscience/P3", task_name)
346
+ if split in ds:
347
+ with jsonlines.open(f'p3_{task_name}_{split}.jsonl', mode='w') as writer:
348
+ for example in ds[split].select(range(len(ds[split]))):
349
+ writer.write({
350
+ "inputs": example["inputs_pretokenized"],
351
+ "targets": example["targets_pretokenized"]
352
+ })
353
+
354
+ def write_to_jsonl_disk(task_name, split):
355
+ ds = load_from_disk(f"{os.environ['six_ALL_CCFRSCRATCH']}/datasets/p3/{task_name}")
356
+ if split in ds:
357
+ with jsonlines.open(f'p3_{task_name}_{split}.jsonl', mode='w') as writer:
358
+ for example in ds[split].select(range(len(ds[split]))):
359
+ writer.write({
360
+ "inputs": example["inputs_pretokenized"],
361
+ "targets": example["targets_pretokenized"]
362
+ })
363
+
364
+ with multiprocessing.Pool(num_proc=multiprocessing.cpu_count()) as pool:
365
+ pool.map(partial(write_to_jsonl_disk, split="train"), TZERO_TASK_LIST)
366
+ pool.map(partial(write_to_jsonl_disk, split="validation"), TZERO_TASK_LIST)
data/p3/prepare_p3.slurm ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=prepare-p3 # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=compil
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+
16
+ # use SCRATCH for building as it's much faster
17
+ cd $six_ALL_CCFRSCRATCH/datasets/bigscience___p3/
18
+ python $six_ALL_CCFRSCRATCH/datasets/bigscience___p3/prepare_p3.py
19
+
data/sampling_probs/calc_iterator_prob.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import json
4
+ import argparse
5
+ import subprocess
6
+ from collections import OrderedDict
7
+ from new_to_old_format_data_path import output_sampling_probs_new_format
8
+
9
+ SPLIT = [0, 0.949, 0.999, 1.0]
10
+
11
+
12
+ def calc_multinomial_sampling_prob_with_penalty(dataset_size, alpha=.5):
13
+ """
14
+ Calculate multinomial probability distribution based on https://arxiv.org/pdf/1901.07291.pdf (section 3.1)
15
+ :dataset_size: A dictionary contains the size (value) of each of the language (key).
16
+ """
17
+ tot_size = 0
18
+ probs = OrderedDict()
19
+ for lang, size in dataset_size.items():
20
+ tot_size += size
21
+ for lang, size in dataset_size.items():
22
+ probs[lang] = size / tot_size
23
+
24
+ pen_prob = OrderedDict()
25
+ tot_pen_prob = 0.0
26
+ for lang, prob in probs.items():
27
+ tot_pen_prob += (prob ** alpha)
28
+ sum_ = 0.0
29
+ for lang, prob in probs.items():
30
+ pen_prob[lang] = (prob ** alpha) / tot_pen_prob
31
+ sum_ += pen_prob[lang]
32
+ assert math.fabs(1 - sum_) < 1e-6
33
+ return pen_prob
34
+
35
+
36
+ def get_size_stats(args):
37
+ """
38
+ Calculate size for each of the iterator.
39
+ It recusively iterate though a directory to find a specific extension file and report their size in preferred format.
40
+ """
41
+ lang_size_dict = {}
42
+ for (dirpath, dirnames, filenames) in os.walk(args.data_folder_path):
43
+ for filename in filenames:
44
+ if not (filename.startswith(args.name_prefix) and filename.endswith(args.extension_name)):
45
+ continue
46
+ full_file_path = os.path.join(dirpath, filename)
47
+ lang_size = subprocess.check_output("du -s {}".format(full_file_path), shell=True)
48
+ lang_size = int(lang_size.decode("utf-8").split("\t")[0])
49
+ if args.size_format == 'KB':
50
+ _conv = 1
51
+ elif args.size_format == 'MB':
52
+ _conv = 1024
53
+ elif args.size_format == 'GB':
54
+ _conv = 1024 * 1024
55
+ elif args.size_format == 'TB':
56
+ _conv = 1024 * 1024 * 1024
57
+ lang_size_ = round(lang_size / float(_conv), 2)
58
+ lang_size_dict[full_file_path] = lang_size_
59
+ return lang_size_dict
60
+
61
+
62
+ def print_stat(args, lang_size_dict, value_name='size'):
63
+ """
64
+ Print size statistics.
65
+ """
66
+ lang_list = sorted([(k, v) for k, v in lang_size_dict.items()], key=lambda tup: tup[1])
67
+ total_size = 0
68
+ print("\nLanguage : ({})".format(value_name))
69
+ print("-" * 20)
70
+ for lang, size in lang_list:
71
+ print("{} : {}".format(lang, size))
72
+ total_size += size
73
+ print("-" * 20)
74
+ print("Total size : {}".format(total_size))
75
+
76
+
77
+ def removesuffix(string, suffix):
78
+ if string.endswith(suffix):
79
+ string = string[:-len(suffix)]
80
+ return string
81
+
82
+
83
+ def main():
84
+ parser = argparse.ArgumentParser()
85
+ parser.add_argument('--data-folder-path', type=str, required=True,
86
+ help='Path to the data folder')
87
+ parser.add_argument('--size-format', type=str, required=True,
88
+ help='Calculation will be done in byte, mega-byte, giga-byte or tera-byte',
89
+ choices=['KB', 'MB', 'GB', 'TB'])
90
+ parser.add_argument('--alpha', type=float, required=True,
91
+ help='Sampling penalty.')
92
+ parser.add_argument('--output-dir', type=str, required=True,
93
+ help='Output directory where sampling prob_dict will be saved.')
94
+ parser.add_argument('--name-prefix', type=str, required=True,
95
+ help='File name prefix to match. Combination of `--name-prefix` and --extension-name will be used to select file.')
96
+ parser.add_argument('--extension-name', type=str, required=True,
97
+ help='Extension of the file to match. Combination of `--name-prefix` and --extension-name will be used to select file')
98
+ parser.add_argument('--old-format', action="store_true",
99
+ help='Legacy option')
100
+
101
+ args = parser.parse_args()
102
+ size_dict = get_size_stats(args)
103
+ print_stat(args, size_dict, value_name=args.size_format)
104
+ sampling_probability = calc_multinomial_sampling_prob_with_penalty(
105
+ size_dict, alpha=args.alpha
106
+ )
107
+ print_stat(args, sampling_probability, 'probability')
108
+ total_contrib = 0
109
+ print("\nLanguage : Per epoch contribution in {}".format(args.size_format))
110
+ print("-" * 50)
111
+ for lang, prob in sampling_probability.items():
112
+ sampling_probability[lang] = (prob, size_dict[lang])
113
+ lang_contrib_size = round(size_dict[lang] * prob, 2)
114
+ print("{} : {} ({} -> {})".format(lang, prob, size_dict[lang], lang_contrib_size))
115
+ total_contrib += lang_contrib_size
116
+ print("-" * 50)
117
+ print("Total size : {}".format(total_contrib))
118
+
119
+ open(os.path.join(args.output_dir, 'iterator_selection_prob.{}.json'.format(args.alpha)), "w").write(
120
+ json.dumps(sampling_probability, indent=4)
121
+ )
122
+
123
+ if args.old_format:
124
+ with open(os.path.join(args.output_dir, "dataset_probabilities.{}.txt".format(args.alpha)), "w") as fout:
125
+ fout.write(
126
+ " ".join([f"{prob[0]} {removesuffix(path, '.bin')}" for path, prob in sampling_probability.items()]))
127
+ pass
128
+ else:
129
+ output_sampling_probs_new_format(sampling_probability, args.output_dir, args.alpha)
130
+
131
+ if __name__ == '__main__':
132
+ main()
data/sampling_probs/calc_iterator_prob.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mkdir -p $1
2
+ echo $1
3
+ echo $2
4
+ for alpha in .1 .2 .3 .4 .5 .6 .7 .8 .9; do
5
+ python data/sampling_probs/calc_iterator_prob.py \
6
+ --data-folder-path $2/ \
7
+ --size-format GB \
8
+ --alpha $alpha \
9
+ --output-dir $1 \
10
+ --name-prefix 'train' \
11
+ --extension-name 'bin'
12
+ done
data/sampling_probs/new_to_old_format_data_path.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os.path
3
+ from glob import glob
4
+ import json
5
+
6
+ SPLIT = [0, 0.949, 0.999, 1.0]
7
+
8
+
9
+ def finalize_dataset_string(dataset_string):
10
+ # remove trailing comma in case
11
+ # surround with quotes
12
+ if dataset_string.endswith(","):
13
+ dataset_string = dataset_string[:-1]
14
+ return '"' + dataset_string + '"'
15
+
16
+
17
+ def get_longest_prefix_and_suffix(file1, file2):
18
+ # we're assuming all filepaths have the same format
19
+ prefix = max([i for i in range(len(file1)) if file2.startswith(file1[:i])])
20
+ suffix = min([i for i in range(len(file1) - 1, -1, -1) if file2.endswith(file1[i:])])
21
+ return prefix, suffix
22
+
23
+
24
+ def output_sampling_probs_new_format(sampling_probs, input_dir, alpha):
25
+ file_weights = [(k[:-4], v[0]) for k, v in sampling_probs.items()]
26
+
27
+ prefix, suffix = get_longest_prefix_and_suffix(file_weights[0][0], file_weights[1][0])
28
+
29
+ train_split_string = f"{SPLIT[0]}:{SPLIT[1]}"
30
+ valid_split_string = f"{SPLIT[1]}:{SPLIT[2]}"
31
+ test_split_string = f"{SPLIT[2]}:{SPLIT[3]}"
32
+
33
+ train_string = f"train:"
34
+ for file, weight in file_weights:
35
+ train_string += f" {weight} {train_split_string} {file},"
36
+ train_string = finalize_dataset_string(train_string)
37
+ with open(os.path.join(input_dir, f"train_data_string.{alpha}.txt"), "w") as f:
38
+ f.write(train_string)
39
+
40
+ valid_strings = ["all_valid:"]
41
+ for file, weight in file_weights:
42
+ valid_strings[0] += f" {weight} {valid_split_string} {file},"
43
+ language_code = file[prefix:suffix]
44
+ valid_strings.append(f"valid_{language_code}: 1 {valid_split_string} {file}")
45
+ valid_string = " ".join([finalize_dataset_string(valid_string) for valid_string in valid_strings])
46
+ with open(os.path.join(input_dir, f"valid_data_string.{alpha}.txt"), "w") as f:
47
+ f.write(valid_string)
48
+
49
+ test_strings = ["all_test:"]
50
+ for file, weight in file_weights:
51
+ test_strings[0] += f" {weight} {test_split_string} {file},"
52
+ language_code = file[prefix:suffix]
53
+ test_strings.append(f"test_{language_code}: 1 {test_split_string} {file}")
54
+ test_string = " ".join([finalize_dataset_string(test_string) for test_string in test_strings])
55
+ with open(os.path.join(input_dir, f"test_data_string.{alpha}.txt"), "w") as f:
56
+ f.write(test_string)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ parser = argparse.ArgumentParser()
61
+ parser.add_argument('--input-files-dir', type=str, required=True,
62
+ help='Path to the data folder')
63
+ args = parser.parse_args()
64
+
65
+ for filename in glob(f'{args.input_files_dir}/*.json'):
66
+ # assuming alpha is of the form 0.x, this could break
67
+ alpha = filename[-8:-5]
68
+ # we remove the .bin at the end of the filename
69
+ sampling_probs = json.load(open(filename))
70
+ output_sampling_probs_new_format(sampling_probs, args.input_files_dir, alpha)
data/xp3/download_all_datasets.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from multiprocessing import Pool, cpu_count
2
+ import datasets
3
+ from promptsource.utils import load_dataset
4
+
5
+ all_datasets = [
6
+ ('glue','mrpc'),
7
+ ('glue','qqp'),
8
+ ('paws','labeled_final'),
9
+ ('ai2_arc','ARC-Challenge'),
10
+ ('ai2_arc','ARC-Easy'),
11
+ ('kilt_tasks','hotpotqa'),
12
+ ('trivia_qa','unfiltered'),
13
+ ('web_questions',None),
14
+ ('wiki_qa',None),
15
+ ('adversarial_qa','dbidaf'),
16
+ ('adversarial_qa','dbert'),
17
+ ('adversarial_qa','droberta'),
18
+ ('duorc','SelfRC'),
19
+ ('duorc','ParaphraseRC'),
20
+ ('ropes',None),
21
+ ('squad_v2',None),
22
+ ('super_glue','record'),
23
+ ('quoref',None),
24
+ ('cos_e','v1.11'),
25
+ ('cosmos_qa',None),
26
+ ('dream',None),
27
+ ('openbookqa','main'),
28
+ ('qasc',None),
29
+ ('quail',None),
30
+ ('quarel',None),
31
+ ('quartz',None),
32
+ ('race','high'),
33
+ ('race','middle'),
34
+ ('sciq',None),
35
+ ('social_i_qa',None),
36
+ ('super_glue','boolq'),
37
+ ('super_glue','multirc'),
38
+ ('wiki_hop','original'),
39
+ ('wiqa',None),
40
+ ('piqa',None),
41
+ ('amazon_polarity',None),
42
+ ('app_reviews',None),
43
+ ('imdb',None),
44
+ ('rotten_tomatoes',None),
45
+ ('yelp_review_full',None),
46
+ ('common_gen',None),
47
+ ('wiki_bio',None),
48
+ ('cnn_dailymail','3.0.0'),
49
+ ('gigaword',None),
50
+ ('multi_news',None),
51
+ ('samsum',None),
52
+ ('xsum',None),
53
+ ('ag_news',None),
54
+ ('dbpedia_14',None),
55
+ ('trec',None),
56
+ # Multilingual
57
+ ('GEM/wiki_lingua', 'ar'),
58
+ ('GEM/wiki_lingua', 'en'),
59
+ ('GEM/wiki_lingua', 'es'),
60
+ ('GEM/wiki_lingua', 'fr'),
61
+ ('GEM/wiki_lingua', 'hi'),
62
+ ('GEM/wiki_lingua', 'id'),
63
+ ('GEM/wiki_lingua', 'pt'),
64
+ ('GEM/wiki_lingua', 'vi'),
65
+ ('GEM/wiki_lingua', 'zh'),
66
+ ('Helsinki-NLP/tatoeba_mt', 'ara-eng'),
67
+ ('Helsinki-NLP/tatoeba_mt', 'ara-fra'),
68
+ ('Helsinki-NLP/tatoeba_mt', 'ara-spa'),
69
+ ('Helsinki-NLP/tatoeba_mt', 'ben-eng'),
70
+ ('Helsinki-NLP/tatoeba_mt', 'cat-eng'),
71
+ ('Helsinki-NLP/tatoeba_mt', 'cat-fra'),
72
+ ('Helsinki-NLP/tatoeba_mt', 'cat-por'),
73
+ ('Helsinki-NLP/tatoeba_mt', 'cat-spa'),
74
+ ('Helsinki-NLP/tatoeba_mt', 'eng-cmn_Hans'),
75
+ ('Helsinki-NLP/tatoeba_mt', 'eng-cmn_Hant'),
76
+ ('Helsinki-NLP/tatoeba_mt', 'eng-eus'),
77
+ ('Helsinki-NLP/tatoeba_mt', 'eng-fra'),
78
+ ('Helsinki-NLP/tatoeba_mt', 'eng-hin'),
79
+ ('Helsinki-NLP/tatoeba_mt', 'eng-ind'),
80
+ ('Helsinki-NLP/tatoeba_mt', 'eng-mal'),
81
+ ('Helsinki-NLP/tatoeba_mt', 'eng-mar'),
82
+ ('Helsinki-NLP/tatoeba_mt', 'eng-por'),
83
+ ('Helsinki-NLP/tatoeba_mt', 'eng-run'),
84
+ ('Helsinki-NLP/tatoeba_mt', 'eng-spa'),
85
+ ('Helsinki-NLP/tatoeba_mt', 'eng-swa'),
86
+ ('Helsinki-NLP/tatoeba_mt', 'eng-tam'),
87
+ ('Helsinki-NLP/tatoeba_mt', 'eng-tel'),
88
+ ('Helsinki-NLP/tatoeba_mt', 'eng-urd'),
89
+ ('Helsinki-NLP/tatoeba_mt', 'eng-vie'),
90
+ ('Helsinki-NLP/tatoeba_mt', 'eng-zho'),
91
+ ('Helsinki-NLP/tatoeba_mt', 'eus-spa'),
92
+ ('Helsinki-NLP/tatoeba_mt', 'fra-cmn_Hans'),
93
+ ('Helsinki-NLP/tatoeba_mt', 'fra-cmn_Hant'),
94
+ ('Helsinki-NLP/tatoeba_mt', 'fra-ind'),
95
+ ('Helsinki-NLP/tatoeba_mt', 'fra-por'),
96
+ ('Helsinki-NLP/tatoeba_mt', 'fra-run'),
97
+ ('Helsinki-NLP/tatoeba_mt', 'fra-spa'),
98
+ ('Helsinki-NLP/tatoeba_mt', 'fra-vie'),
99
+ ('Helsinki-NLP/tatoeba_mt', 'fra-zho'),
100
+ ('Helsinki-NLP/tatoeba_mt', 'hin-urd'),
101
+ ('Helsinki-NLP/tatoeba_mt', 'hin-zho'),
102
+ ('Helsinki-NLP/tatoeba_mt', 'por-cmn_Hans'),
103
+ ('Helsinki-NLP/tatoeba_mt', 'por-cmn_Hant'),
104
+ ('Helsinki-NLP/tatoeba_mt', 'por-spa'),
105
+ ('Helsinki-NLP/tatoeba_mt', 'por-zho'),
106
+ ('Helsinki-NLP/tatoeba_mt', 'run-spa'),
107
+ ('Helsinki-NLP/tatoeba_mt', 'spa-cmn_Hans'),
108
+ ('Helsinki-NLP/tatoeba_mt', 'spa-cmn_Hant'),
109
+ ('Helsinki-NLP/tatoeba_mt', 'spa-vie'),
110
+ ('Helsinki-NLP/tatoeba_mt', 'spa-zho'),
111
+ ('Helsinki-NLP/tatoeba_mt', 'vie-cmn_Hans'),
112
+ ('Helsinki-NLP/tatoeba_mt', 'vie-zho'),
113
+ ('xquad', 'xquad.ar'),
114
+ ('xquad', 'xquad.zh'),
115
+ ('xquad', 'xquad.vi'),
116
+ ('xquad', 'xquad.en'),
117
+ ('xquad', 'xquad.es'),
118
+ ('xquad', 'xquad.hi'),
119
+ ('paws-x', 'en'),
120
+ ('paws-x', 'es'),
121
+ ('paws-x', 'fr'),
122
+ ('paws-x', 'zh'),
123
+ ('khalidalt/tydiqa-primary', 'arabic'),
124
+ ('khalidalt/tydiqa-primary', 'bengali'),
125
+ ('khalidalt/tydiqa-primary', 'english'),
126
+ ('khalidalt/tydiqa-primary', 'indonesian'),
127
+ ('khalidalt/tydiqa-primary', 'swahili'),
128
+ ('khalidalt/tydiqa-primary', 'telugu'),
129
+ ('khalidalt/tydiqa-goldp', 'arabic'),
130
+ ('khalidalt/tydiqa-goldp', 'bengali'),
131
+ ('khalidalt/tydiqa-goldp', 'english'),
132
+ ('khalidalt/tydiqa-goldp', 'indonesian'),
133
+ ('khalidalt/tydiqa-goldp', 'swahili'),
134
+ ('khalidalt/tydiqa-goldp', 'telugu'),
135
+ ('Muennighoff/mbpp', 'sanitized'),
136
+ ("openai_humaneval", None),
137
+ ("great_code", None),
138
+ ("neural_code_search", "evaluation_dataset"),
139
+ # flores200
140
+ ]
141
+
142
+ print(all_datasets)
143
+
144
+ def download(names):
145
+ d_name, conf_name = names
146
+ try:
147
+ if d_name == "Helsinki-NLP/tatoeba_mt":
148
+ # Fixes a bug when loading a ds where only test split exists
149
+ ds = datasets.load_dataset(d_name, conf_name, download_config=datasets.DownloadConfig(num_proc=1), ignore_verifications=True, revision="842eb26634a9775f504bb2f3f43cd4cc5f9314d8")
150
+ else:
151
+ ds = load_dataset(d_name, conf_name, download_config=datasets.DownloadConfig(num_proc=1))
152
+ except Exception as e:
153
+ print(f"--- ERROR Dataset {d_name} {conf_name}\n")
154
+ print(e)
155
+ return
156
+
157
+ with Pool(cpu_count()) as pool:
158
+ _ = pool.map(
159
+ download,
160
+ all_datasets,
161
+ )
162
+ print("ALL DONE")
data/xp3/p3_jsonl_to_meg_bos.slurm ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3jsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+
18
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
19
+ cd $MEGATRON_DEEPSPEED_REPO
20
+
21
+
22
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/p31t0/p31t0_train.jsonl
23
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/p31t0bos/p31t0_train
24
+ TOKENIZER_PATH="bigscience/tokenizer"
25
+ python tools/preprocess_data.py \
26
+ --input $DATA_PATH \
27
+ --output-prefix $OUTPUT \
28
+ --dataset-impl mmap \
29
+ --json-key inputs \
30
+ --tokenizer-type PretrainedFromHF \
31
+ --tokenizer-name-or-path $TOKENIZER_PATH \
32
+ --append-bos \
33
+ --workers 35
34
+ python tools/preprocess_data.py \
35
+ --input $DATA_PATH \
36
+ --output-prefix $OUTPUT \
37
+ --dataset-impl mmap \
38
+ --json-key targets \
39
+ --tokenizer-type PretrainedFromHF \
40
+ --tokenizer-name-or-path $TOKENIZER_PATH \
41
+ --append-eod \
42
+ --workers 35
43
+
44
+
45
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/p31t0/p31t0_validation.jsonl
46
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/p31t0bos/p31t0_validation
47
+ TOKENIZER_PATH="bigscience/tokenizer"
48
+
49
+ python tools/preprocess_data.py \
50
+ --input $DATA_PATH \
51
+ --output-prefix $OUTPUT \
52
+ --dataset-impl mmap \
53
+ --json-key inputs \
54
+ --tokenizer-type PretrainedFromHF \
55
+ --tokenizer-name-or-path $TOKENIZER_PATH \
56
+ --append-bos \
57
+ --workers 35
58
+ python tools/preprocess_data.py \
59
+ --input $DATA_PATH \
60
+ --output-prefix $OUTPUT \
61
+ --dataset-impl mmap \
62
+ --json-key targets \
63
+ --tokenizer-type PretrainedFromHF \
64
+ --tokenizer-name-or-path $TOKENIZER_PATH \
65
+ --append-eod \
66
+ --workers 35
data/xp3/p3_jsonl_to_meg_eos.slurm ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3jsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=100:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+
18
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
19
+ cd $MEGATRON_DEEPSPEED_REPO
20
+
21
+
22
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/p31t0/p31t0_train.jsonl
23
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/p31t0eos/p31t0_train
24
+ TOKENIZER_PATH="bigscience/tokenizer"
25
+ python tools/preprocess_data.py \
26
+ --input $DATA_PATH \
27
+ --output-prefix $OUTPUT \
28
+ --dataset-impl mmap \
29
+ --json-key inputs \
30
+ --tokenizer-type PretrainedFromHF \
31
+ --tokenizer-name-or-path $TOKENIZER_PATH \
32
+ --append-eod \
33
+ --workers 35
34
+ python tools/preprocess_data.py \
35
+ --input $DATA_PATH \
36
+ --output-prefix $OUTPUT \
37
+ --dataset-impl mmap \
38
+ --json-key targets \
39
+ --tokenizer-type PretrainedFromHF \
40
+ --tokenizer-name-or-path $TOKENIZER_PATH \
41
+ --append-eod \
42
+ --workers 35
43
+
44
+
45
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/p31t0/p31t0_validation.jsonl
46
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/p31t0eos/p31t0_validation
47
+ TOKENIZER_PATH="bigscience/tokenizer"
48
+
49
+ python tools/preprocess_data.py \
50
+ --input $DATA_PATH \
51
+ --output-prefix $OUTPUT \
52
+ --dataset-impl mmap \
53
+ --json-key inputs \
54
+ --tokenizer-type PretrainedFromHF \
55
+ --tokenizer-name-or-path $TOKENIZER_PATH \
56
+ --append-eod \
57
+ --workers 35
58
+ python tools/preprocess_data.py \
59
+ --input $DATA_PATH \
60
+ --output-prefix $OUTPUT \
61
+ --dataset-impl mmap \
62
+ --json-key targets \
63
+ --tokenizer-type PretrainedFromHF \
64
+ --tokenizer-name-or-path $TOKENIZER_PATH \
65
+ --append-eod \
66
+ --workers 35
data/xp3/prepare_xp3_train.py ADDED
@@ -0,0 +1,1194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import json
3
+ import multiprocessing
4
+ import os
5
+ import random
6
+
7
+ from datasets import load_dataset
8
+ # pip install -q iso-639
9
+ from iso639 import languages
10
+ from promptsource.templates import DatasetTemplates
11
+
12
+ # Set to False to use multilingual prompts e.g. 'id' for xcopa/id instead of 'en'
13
+ USE_ENGLISH_PROMPTS = True
14
+
15
+ MAX_EXAMPLES_PER_DATASET_PROMPT = 100_000
16
+
17
+ STORY_CLOZE_DIR = "/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/story_cloze_data"
18
+ XSTORY_CLOZE_DIR = "/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0/xstory_cloze_data"
19
+
20
+ # Some datasets have test sets with hidden labels which will still compile but only to noise
21
+ # e.g. piqa test labels are all [-1] which still works on list indices resulting in
22
+ # noise samples where the label is always the same
23
+ SKIP_PROMPTS = {
24
+ "common_gen": {"test": ["all"]},
25
+ "piqa": {"test": ["all"]},
26
+ "qasc": {"test": ["all"]},
27
+ "imdb": {"unsupervised": ["all"]},
28
+ "glue/qqp": {"test": ["all"]},
29
+ "qasc": {"test": ["all"]},
30
+ "cosmos_qa": {"test": [
31
+ "description_context_question_answer_text",
32
+ "description_context_question_text",
33
+ "description_context_question_answer_id",
34
+ "context_answer_to_question",
35
+ "context_description_question_answer_text",
36
+ "context_description_question_answer_id",
37
+ "context_question_description_answer_id",
38
+ "context_description_question_text",
39
+ "context_question_description_answer_text",
40
+ "only_question_answer",
41
+ "no_prompt_id",
42
+ "context_question_description_text",
43
+ "no_prompt_text",
44
+ ]},
45
+ "clue/tnews": {"test": ["all"]},
46
+ "clue/csl": {"test": ["all"]},
47
+ "clue/cmrc2018": {"test": ["generate_question", "in_an_exam", "answer_in_the_passage", "answer_following_question", "xp3longcontinue"]},
48
+ "clue/drcd": {"test": ["generate_question", "in_an_exam", "answer_in_the_passage", "answer_following_question", "xp3longcontinue"]},
49
+ "hellaswag": {"test": ["complete_first_then", "Topic of the context", "Open-ended completion", "Randomized prompts template", "Appropriate continuation - Yes or No", "Predict ending with hint", "Open-ended start", "Reversed appropriate continuation - Yes or No", "how_ends", "if_begins_how_continues"]},
50
+ }
51
+
52
+ DS_TO_ENG_PROMPT = {
53
+ "xcopa": "en",
54
+ "Muennighoff/xstory_cloze": "en",
55
+ "Muennighoff/xwinograd": "en",
56
+ 'GEM/wiki_lingua': 'en_en', # Contains correct language names
57
+ 'xnli': 'en',
58
+ "paws-x": "en",
59
+ "mlqa": "mlqa.en.en",
60
+ "xquad": "xquad.en",
61
+ "khalidalt/tydiqa-primary": "english",
62
+ "khalidalt/tydiqa-goldp": "english",
63
+ "pasinit/xlwic": "en",
64
+ "GEM/xlsum": "english",
65
+ "GEM/BiSECT": "en",
66
+ }
67
+
68
+ BIAS_FAIRNESS = [
69
+ ('crows_pairs', None),
70
+ ('jigsaw_toxicity_pred', None),
71
+ ('super_glue','axg'),
72
+ ('wino_bias','type1_anti'),
73
+ ('wino_bias','type2_anti'),
74
+ ('wino_bias','type1_pro'),
75
+ ('wino_bias','type2_pro'),
76
+ ]
77
+
78
+ EVAL_DATASETS_L1 = [
79
+ # ('super_glue','wsc.fixed'), # Not used due to time constraints
80
+ ('winogrande','winogrande_xl'),
81
+ ('super_glue','cb'),
82
+ ('super_glue','rte'),
83
+ ('anli',None),
84
+ ('story_cloze', '2016'),
85
+ ('Muennighoff/xstory_cloze', 'ar'),
86
+ ('Muennighoff/xstory_cloze', 'es'),
87
+ ('Muennighoff/xstory_cloze', 'eu'),
88
+ ('Muennighoff/xstory_cloze', 'id'),
89
+ ('Muennighoff/xstory_cloze', 'hi'),
90
+ ('Muennighoff/xstory_cloze', 'te'),
91
+ ('Muennighoff/xstory_cloze', 'sw'),
92
+ ('Muennighoff/xstory_cloze', 'zh'),
93
+ # ('hellaswag', None), # Not used due to time constraints
94
+ ('super_glue', 'copa'),
95
+ # Multilingual
96
+ ('Muennighoff/xwinograd','en'),
97
+ ('Muennighoff/xwinograd','fr'),
98
+ ('Muennighoff/xwinograd','pt'),
99
+ ('Muennighoff/xwinograd','zh'),
100
+ # ('clue', 'cluewsc2020'), # Included in 'Muennighoff/xwinograd','zh'
101
+ ('xcopa','id'),
102
+ ('xcopa','ta'),
103
+ ('xcopa','sw'),
104
+ ('xcopa','vi'),
105
+ ('xcopa','zh'),
106
+ ("xnli", "ar"),
107
+ ("xnli", "en"),
108
+ ("xnli", "es"),
109
+ ("xnli", "fr"),
110
+ ("xnli", "hi"),
111
+ ("xnli", "sw"),
112
+ ("xnli", "ur"),
113
+ ("xnli", "vi"),
114
+ ("xnli", "zh"),
115
+ # ("openai_humaneval", None), # Used without prompts in evaluation
116
+ # ("multi_eurlex", "all_languages")
117
+ ]
118
+
119
+ ADD_TRAIN_DATASETS_L1_XP3ALL = [
120
+ ('super_glue','wsc.fixed'),
121
+ ('winogrande','winogrande_xl'),
122
+ ('story_cloze', '2016'),
123
+ ('Muennighoff/xstory_cloze', 'ar'),
124
+ ('Muennighoff/xstory_cloze', 'es'),
125
+ ('Muennighoff/xstory_cloze', 'eu'),
126
+ ('Muennighoff/xstory_cloze', 'id'),
127
+ ('Muennighoff/xstory_cloze', 'hi'),
128
+ ('Muennighoff/xstory_cloze', 'te'),
129
+ ('Muennighoff/xstory_cloze', 'sw'),
130
+ ('Muennighoff/xstory_cloze', 'zh'),
131
+ ('hellaswag', None),
132
+ ('super_glue', 'copa'),
133
+ # Multilingual
134
+ ('Muennighoff/xwinograd','en'),
135
+ ('Muennighoff/xwinograd','fr'),
136
+ ('Muennighoff/xwinograd','pt'),
137
+ ('Muennighoff/xwinograd','zh'),
138
+ ('clue', 'cluewsc2020'),
139
+ ('xcopa','id'),
140
+ ('xcopa','ta'),
141
+ ('xcopa','sw'),
142
+ ('xcopa','vi'),
143
+ ('xcopa','zh'),
144
+ ("multi_eurlex", "all_languages")
145
+ # ("openai_humaneval", None), # Low quality prompts
146
+ ]
147
+
148
+ EVAL_DATASETS_L2 = [
149
+ ('Muennighoff/xwinograd','jp'),
150
+ ('Muennighoff/xwinograd','ru'),
151
+ ('xcopa','et'),
152
+ ('xcopa','ht'),
153
+ ('xcopa','it'),
154
+ ('xcopa','qu'),
155
+ ('xcopa','th'),
156
+ ('xcopa','tr'),
157
+ ("xnli", "bg"),
158
+ ("xnli", "de"),
159
+ ("xnli", "el"),
160
+ ("xnli", "ru"),
161
+ ("xnli", "th"),
162
+ ("xnli", "tr"),
163
+ ]
164
+
165
+ TRAIN_DATASETS = [
166
+ # English-only
167
+ ('glue','mrpc'),
168
+ ('glue','qqp'),
169
+ ('paws','labeled_final'),
170
+ ('ai2_arc','ARC-Challenge'),
171
+ ('ai2_arc','ARC-Easy'),
172
+ ('kilt_tasks','hotpotqa'),
173
+ ('trivia_qa','unfiltered'),
174
+ ('web_questions',None),
175
+ ('wiki_qa',None),
176
+ ('adversarial_qa','dbidaf'),
177
+ ('adversarial_qa','dbert'),
178
+ ('adversarial_qa','droberta'),
179
+ ('duorc','SelfRC'),
180
+ ('duorc','ParaphraseRC'),
181
+ ('ropes',None),
182
+ ('squad_v2',None),
183
+ ('super_glue','record'),
184
+ ('quoref',None),
185
+ ('cos_e','v1.11'),
186
+ ('cosmos_qa',None),
187
+ ('dream',None),
188
+ ('openbookqa','main'),
189
+ ('qasc',None),
190
+ ('quail',None),
191
+ ('quarel',None),
192
+ ('quartz',None),
193
+ ('race','high'),
194
+ ('race','middle'),
195
+ ('sciq',None),
196
+ ('social_i_qa',None),
197
+ ('super_glue','boolq'),
198
+ ('super_glue','multirc'),
199
+ ('wiki_hop','original'),
200
+ ('wiqa',None),
201
+ ('piqa',None),
202
+ ('amazon_polarity',None),
203
+ ('app_reviews',None),
204
+ ('imdb',None),
205
+ ('rotten_tomatoes',None),
206
+ ('yelp_review_full',None),
207
+ ('common_gen',None),
208
+ ('wiki_bio',None),
209
+ ('cnn_dailymail','3.0.0'),
210
+ ('gigaword',None),
211
+ ('multi_news',None),
212
+ ('samsum',None),
213
+ ('xsum',None),
214
+ ('ag_news',None),
215
+ ('dbpedia_14',None),
216
+ ('trec',None),
217
+ # Multilingual
218
+ ('GEM/wiki_lingua', 'ar'),
219
+ ('GEM/wiki_lingua', 'en'),
220
+ ('GEM/wiki_lingua', 'es'),
221
+ ('GEM/wiki_lingua', 'fr'),
222
+ ('GEM/wiki_lingua', 'hi'),
223
+ ('GEM/wiki_lingua', 'id'),
224
+ ('GEM/wiki_lingua', 'pt'),
225
+ ('GEM/wiki_lingua', 'vi'),
226
+ ('GEM/wiki_lingua', 'zh'),
227
+ ('Helsinki-NLP/tatoeba_mt', 'ara-eng'),
228
+ ('Helsinki-NLP/tatoeba_mt', 'ara-fra'),
229
+ ('Helsinki-NLP/tatoeba_mt', 'ara-spa'),
230
+ ('Helsinki-NLP/tatoeba_mt', 'ben-eng'),
231
+ ('Helsinki-NLP/tatoeba_mt', 'cat-eng'),
232
+ ('Helsinki-NLP/tatoeba_mt', 'cat-fra'),
233
+ ('Helsinki-NLP/tatoeba_mt', 'cat-por'),
234
+ ('Helsinki-NLP/tatoeba_mt', 'cat-spa'),
235
+ ('Helsinki-NLP/tatoeba_mt', 'eng-cmn_Hans'),
236
+ ('Helsinki-NLP/tatoeba_mt', 'eng-cmn_Hant'),
237
+ ('Helsinki-NLP/tatoeba_mt', 'eng-eus'),
238
+ ('Helsinki-NLP/tatoeba_mt', 'eng-fra'),
239
+ ('Helsinki-NLP/tatoeba_mt', 'eng-hin'),
240
+ ('Helsinki-NLP/tatoeba_mt', 'eng-ind'),
241
+ ('Helsinki-NLP/tatoeba_mt', 'eng-mal'),
242
+ ('Helsinki-NLP/tatoeba_mt', 'eng-mar'),
243
+ ('Helsinki-NLP/tatoeba_mt', 'eng-por'),
244
+ ('Helsinki-NLP/tatoeba_mt', 'eng-run'),
245
+ ('Helsinki-NLP/tatoeba_mt', 'eng-spa'),
246
+ ('Helsinki-NLP/tatoeba_mt', 'eng-swa'),
247
+ ('Helsinki-NLP/tatoeba_mt', 'eng-tam'),
248
+ ('Helsinki-NLP/tatoeba_mt', 'eng-tel'),
249
+ ('Helsinki-NLP/tatoeba_mt', 'eng-urd'),
250
+ ('Helsinki-NLP/tatoeba_mt', 'eng-vie'),
251
+ ('Helsinki-NLP/tatoeba_mt', 'eng-zho'),
252
+ ('Helsinki-NLP/tatoeba_mt', 'eus-spa'),
253
+ ('Helsinki-NLP/tatoeba_mt', 'fra-cmn_Hans'),
254
+ ('Helsinki-NLP/tatoeba_mt', 'fra-cmn_Hant'),
255
+ ('Helsinki-NLP/tatoeba_mt', 'fra-ind'),
256
+ ('Helsinki-NLP/tatoeba_mt', 'fra-por'),
257
+ ('Helsinki-NLP/tatoeba_mt', 'fra-run'),
258
+ ('Helsinki-NLP/tatoeba_mt', 'fra-spa'),
259
+ ('Helsinki-NLP/tatoeba_mt', 'fra-vie'),
260
+ ('Helsinki-NLP/tatoeba_mt', 'fra-zho'),
261
+ ('Helsinki-NLP/tatoeba_mt', 'hin-urd'),
262
+ ('Helsinki-NLP/tatoeba_mt', 'hin-zho'),
263
+ ('Helsinki-NLP/tatoeba_mt', 'por-cmn_Hans'),
264
+ ('Helsinki-NLP/tatoeba_mt', 'por-cmn_Hant'),
265
+ ('Helsinki-NLP/tatoeba_mt', 'por-spa'),
266
+ ('Helsinki-NLP/tatoeba_mt', 'por-zho'),
267
+ ('Helsinki-NLP/tatoeba_mt', 'run-spa'),
268
+ ('Helsinki-NLP/tatoeba_mt', 'spa-cmn_Hans'),
269
+ ('Helsinki-NLP/tatoeba_mt', 'spa-cmn_Hant'),
270
+ ('Helsinki-NLP/tatoeba_mt', 'spa-vie'),
271
+ ('Helsinki-NLP/tatoeba_mt', 'spa-zho'),
272
+ ('Helsinki-NLP/tatoeba_mt', 'vie-cmn_Hans'),
273
+ ('Helsinki-NLP/tatoeba_mt', 'vie-zho'),
274
+ ('xquad', 'xquad.ar'),
275
+ ('xquad', 'xquad.zh'),
276
+ ('xquad', 'xquad.vi'),
277
+ ('xquad', 'xquad.en'),
278
+ ('xquad', 'xquad.es'),
279
+ ('xquad', 'xquad.hi'),
280
+ ('mlqa', 'mlqa.ar.ar'),
281
+ ('mlqa', 'mlqa.vi.vi'),
282
+ ('mlqa', 'mlqa.zh.zh'),
283
+ ('mlqa', 'mlqa.es.es'),
284
+ ('mlqa', 'mlqa.en.en'),
285
+ ('mlqa', 'mlqa.hi.hi'),
286
+
287
+ ('mlqa', 'mlqa.ar.vi'),
288
+ ('mlqa', 'mlqa.ar.zh'),
289
+ ('mlqa', 'mlqa.ar.es'),
290
+ ('mlqa', 'mlqa.ar.en'),
291
+ ('mlqa', 'mlqa.ar.hi'),
292
+
293
+ ('mlqa', 'mlqa.vi.ar'),
294
+ ('mlqa', 'mlqa.vi.zh'),
295
+ ('mlqa', 'mlqa.vi.es'),
296
+ ('mlqa', 'mlqa.vi.en'),
297
+ ('mlqa', 'mlqa.vi.hi'),
298
+
299
+ ('mlqa', 'mlqa.zh.ar'),
300
+ ('mlqa', 'mlqa.zh.vi'),
301
+ ('mlqa', 'mlqa.zh.es'),
302
+ ('mlqa', 'mlqa.zh.en'),
303
+ ('mlqa', 'mlqa.zh.hi'),
304
+
305
+ ('mlqa', 'mlqa.es.ar'),
306
+ ('mlqa', 'mlqa.es.vi'),
307
+ ('mlqa', 'mlqa.es.zh'),
308
+ ('mlqa', 'mlqa.es.en'),
309
+ ('mlqa', 'mlqa.es.hi'),
310
+
311
+ ('mlqa', 'mlqa.en.ar'),
312
+ ('mlqa', 'mlqa.es.vi'),
313
+ ('mlqa', 'mlqa.es.zh'),
314
+ ('mlqa', 'mlqa.es.es'),
315
+ ('mlqa', 'mlqa.es.hi'),
316
+
317
+ ('mlqa', 'mlqa.hi.ar'),
318
+ ('mlqa', 'mlqa.hi.vi'),
319
+ ('mlqa', 'mlqa.hi.zh'),
320
+ ('mlqa', 'mlqa.hi.es'),
321
+ ('mlqa', 'mlqa.hi.en'),
322
+
323
+ ('paws-x', 'en'),
324
+ ('paws-x', 'es'),
325
+ ('paws-x', 'fr'),
326
+ ('paws-x', 'zh'),
327
+ ('khalidalt/tydiqa-primary', 'arabic'),
328
+ ('khalidalt/tydiqa-primary', 'bengali'),
329
+ ('khalidalt/tydiqa-primary', 'english'),
330
+ ('khalidalt/tydiqa-primary', 'indonesian'),
331
+ ('khalidalt/tydiqa-primary', 'swahili'),
332
+ ('khalidalt/tydiqa-primary', 'telugu'),
333
+ ('khalidalt/tydiqa-goldp', 'arabic'),
334
+ ('khalidalt/tydiqa-goldp', 'bengali'),
335
+ ('khalidalt/tydiqa-goldp', 'english'),
336
+ ('khalidalt/tydiqa-goldp', 'indonesian'),
337
+ ('khalidalt/tydiqa-goldp', 'swahili'),
338
+ ('khalidalt/tydiqa-goldp', 'telugu'),
339
+ ('Muennighoff/mbpp', 'sanitized'),
340
+ ("great_code", None),
341
+ ("neural_code_search", "evaluation_dataset"),
342
+ ("codeparrot/codecomplex", "codeparrot--codecomplex"),
343
+ ("codeparrot/github-jupyter-text-code-pairs", None),
344
+ ("codeparrot/apps", "all"),
345
+ ("codeparrot/xlcost-text-to-code", "Python-program-level"),
346
+ ("codeparrot/xlcost-text-to-code", "C-program-level"),
347
+ ("codeparrot/xlcost-text-to-code", "C++-program-level"),
348
+ ("codeparrot/xlcost-text-to-code", "Csharp-program-level"),
349
+ ("codeparrot/xlcost-text-to-code", "Java-program-level"),
350
+ ("codeparrot/xlcost-text-to-code", "Javascript-program-level"),
351
+ ("codeparrot/xlcost-text-to-code", "PHP-program-level"),
352
+ ("teven/code_contests", None),
353
+ ("teven/code_docstring_corpus", "top_level"),
354
+ ("Fraser/python-state-changes", None),
355
+ ('clue', 'c3'),
356
+ ('clue', 'cmrc2018'),
357
+ ('clue', 'csl'),
358
+ ('clue', 'drcd'),
359
+ ('clue', 'tnews'),
360
+ ('super_glue', 'wic'),
361
+ ('pasinit/xlwic', "xlwic_en_zh"),
362
+ ('pasinit/xlwic', "xlwic_fr_fr"),
363
+ ('GEM/BiSECT', "en"),
364
+ ('GEM/BiSECT', "es"),
365
+ ('GEM/BiSECT', "fr"),
366
+ ('GEM/xlsum', "arabic"),
367
+ ('GEM/xlsum', "bengali"),
368
+ ('GEM/xlsum', "chinese_simplified"),
369
+ ('GEM/xlsum', "chinese_traditional"),
370
+ ('GEM/xlsum', "english"),
371
+ ('GEM/xlsum', "french"),
372
+ ('GEM/xlsum', "gujarati"),
373
+ ('GEM/xlsum', "hindi"),
374
+ ('GEM/xlsum', "igbo"),
375
+ ('GEM/xlsum', "indonesian"),
376
+ ('GEM/xlsum', "kirundi"),
377
+ ('GEM/xlsum', "marathi"),
378
+ ('GEM/xlsum', "nepali"),
379
+ ('GEM/xlsum', "portuguese"),
380
+ ('GEM/xlsum', "punjabi"),
381
+ ('GEM/xlsum', "spanish"),
382
+ ('GEM/xlsum', "swahili"),
383
+ ('GEM/xlsum', "tamil"),
384
+ ('GEM/xlsum', "telugu"),
385
+ ('GEM/xlsum', "urdu"),
386
+ ('GEM/xlsum', "vietnamese"),
387
+ ('GEM/xlsum', "yoruba"),
388
+ # flores200, wmt & more wikilingua added below
389
+ ]
390
+
391
+ FLORES_LANGS = [
392
+ ("Acehnese (Arabic script)", "ace_Arab"),
393
+ ("Acehnese (Latin script)", "ace_Latn"),
394
+ ("Mesopotamian Arabic", "acm_Arab"),
395
+ ("Ta’izzi-Adeni Arabic", "acq_Arab"),
396
+ ("Tunisian Arabic", "aeb_Arab"),
397
+ ("Afrikaans", "afr_Latn"),
398
+ ("South Levantine Arabic", "ajp_Arab"),
399
+ ("Akan", "aka_Latn"),
400
+ ("Amharic", "amh_Ethi"),
401
+ ("North Levantine Arabic", "apc_Arab"),
402
+ ("Modern Standard Arabic", "arb_Arab"),
403
+ ("Modern Standard Arabic (Romanized)", "arb_Latn"),
404
+ ("Najdi Arabic", "ars_Arab"),
405
+ ("Moroccan Arabic", "ary_Arab"),
406
+ ("Egyptian Arabic", "arz_Arab"),
407
+ ("Assamese", "asm_Beng"),
408
+ ("Asturian", "ast_Latn"),
409
+ ("Awadhi", "awa_Deva"),
410
+ ("Central Aymara", "ayr_Latn"),
411
+ ("South Azerbaijani", "azb_Arab"),
412
+ ("North Azerbaijani", "azj_Latn"),
413
+ ("Bashkir", "bak_Cyrl"),
414
+ ("Bambara", "bam_Latn"),
415
+ ("Balinese", "ban_Latn"),
416
+ ("Belarusian", "bel_Cyrl"),
417
+ ("Bemba", "bem_Latn"),
418
+ ("Bengali", "ben_Beng"),
419
+ ("Bhojpuri", "bho_Deva"),
420
+ ("Banjar (Arabic script)", "bjn_Arab"),
421
+ ("Banjar (Latin script)", "bjn_Latn"),
422
+ ("Standard Tibetan", "bod_Tibt"),
423
+ ("Bosnian", "bos_Latn"),
424
+ ("Buginese", "bug_Latn"),
425
+ ("Bulgarian", "bul_Cyrl"),
426
+ ("Catalan", "cat_Latn"),
427
+ ("Cebuano", "ceb_Latn"),
428
+ ("Czech", "ces_Latn"),
429
+ ("Chokwe", "cjk_Latn"),
430
+ ("Central Kurdish", "ckb_Arab"),
431
+ ("Crimean Tatar", "crh_Latn"),
432
+ ("Welsh", "cym_Latn"),
433
+ ("Danish", "dan_Latn"),
434
+ ("German", "deu_Latn"),
435
+ ("Southwestern Dinka", "dik_Latn"),
436
+ ("Dyula", "dyu_Latn"),
437
+ ("Dzongkha", "dzo_Tibt"),
438
+ ("Greek", "ell_Grek"),
439
+ ("English", "eng_Latn"),
440
+ ("Esperanto", "epo_Latn"),
441
+ ("Estonian", "est_Latn"),
442
+ ("Basque", "eus_Latn"),
443
+ ("Ewe", "ewe_Latn"),
444
+ ("Faroese", "fao_Latn"),
445
+ ("Fijian", "fij_Latn"),
446
+ ("Finnish", "fin_Latn"),
447
+ ("Fon", "fon_Latn"),
448
+ ("French", "fra_Latn"),
449
+ ("Friulian", "fur_Latn"),
450
+ ("Nigerian Fulfulde", "fuv_Latn"),
451
+ ("Scottish Gaelic", "gla_Latn"),
452
+ ("Irish", "gle_Latn"),
453
+ ("Galician", "glg_Latn"),
454
+ ("Guarani", "grn_Latn"),
455
+ ("Gujarati", "guj_Gujr"),
456
+ ("Haitian Creole", "hat_Latn"),
457
+ ("Hausa", "hau_Latn"),
458
+ ("Hebrew", "heb_Hebr"),
459
+ ("Hindi", "hin_Deva"),
460
+ ("Chhattisgarhi", "hne_Deva"),
461
+ ("Croatian", "hrv_Latn"),
462
+ ("Hungarian", "hun_Latn"),
463
+ ("Armenian", "hye_Armn"),
464
+ ("Igbo", "ibo_Latn"),
465
+ ("Ilocano", "ilo_Latn"),
466
+ ("Indonesian", "ind_Latn"),
467
+ ("Icelandic", "isl_Latn"),
468
+ ("Italian", "ita_Latn"),
469
+ ("Javanese", "jav_Latn"),
470
+ ("Japanese", "jpn_Jpan"),
471
+ ("Kabyle", "kab_Latn"),
472
+ ("Jingpho", "kac_Latn"),
473
+ ("Kamba", "kam_Latn"),
474
+ ("Kannada", "kan_Knda"),
475
+ ("Kashmiri (Arabic script)", "kas_Arab"),
476
+ ("Kashmiri (Devanagari script)", "kas_Deva"),
477
+ ("Georgian", "kat_Geor"),
478
+ ("Central Kanuri (Arabic script)", "knc_Arab"),
479
+ ("Central Kanuri (Latin script)", "knc_Latn"),
480
+ ("Kazakh", "kaz_Cyrl"),
481
+ ("Kabiyè", "kbp_Latn"),
482
+ ("Kabuverdianu", "kea_Latn"),
483
+ ("Khmer", "khm_Khmr"),
484
+ ("Kikuyu", "kik_Latn"),
485
+ ("Kinyarwanda", "kin_Latn"),
486
+ ("Kyrgyz", "kir_Cyrl"),
487
+ ("Kimbundu", "kmb_Latn"),
488
+ ("Northern Kurdish", "kmr_Latn"),
489
+ ("Kikongo", "kon_Latn"),
490
+ ("Korean", "kor_Hang"),
491
+ ("Lao", "lao_Laoo"),
492
+ ("Ligurian", "lij_Latn"),
493
+ ("Limburgish", "lim_Latn"),
494
+ ("Lingala", "lin_Latn"),
495
+ ("Lithuanian", "lit_Latn"),
496
+ ("Lombard", "lmo_Latn"),
497
+ ("Latgalian", "ltg_Latn"),
498
+ ("Luxembourgish", "ltz_Latn"),
499
+ ("Luba-Kasai", "lua_Latn"),
500
+ ("Ganda", "lug_Latn"),
501
+ ("Luo", "luo_Latn"),
502
+ ("Mizo", "lus_Latn"),
503
+ ("Standard Latvian", "lvs_Latn"),
504
+ ("Magahi", "mag_Deva"),
505
+ ("Maithili", "mai_Deva"),
506
+ ("Malayalam", "mal_Mlym"),
507
+ ("Marathi", "mar_Deva"),
508
+ ("Minangkabau (Arabic script)", "min_Arab"),
509
+ ("Minangkabau (Latin script)", "min_Latn"),
510
+ ("Macedonian", "mkd_Cyrl"),
511
+ ("Plateau Malagasy", "plt_Latn"),
512
+ ("Maltese", "mlt_Latn"),
513
+ ("Meitei (Bengali script)", "mni_Beng"),
514
+ ("Halh Mongolian", "khk_Cyrl"),
515
+ ("Mossi", "mos_Latn"),
516
+ ("Maori", "mri_Latn"),
517
+ ("Burmese", "mya_Mymr"),
518
+ ("Dutch", "nld_Latn"),
519
+ ("Norwegian Nynorsk", "nno_Latn"),
520
+ ("Norwegian Bokmål", "nob_Latn"),
521
+ ("Nepali", "npi_Deva"),
522
+ ("Northern Sotho", "nso_Latn"),
523
+ ("Nuer", "nus_Latn"),
524
+ ("Nyanja", "nya_Latn"),
525
+ ("Occitan", "oci_Latn"),
526
+ ("West Central Oromo", "gaz_Latn"),
527
+ ("Odia", "ory_Orya"),
528
+ ("Pangasinan", "pag_Latn"),
529
+ ("Eastern Panjabi", "pan_Guru"),
530
+ ("Papiamento", "pap_Latn"),
531
+ ("Western Persian", "pes_Arab"),
532
+ ("Polish", "pol_Latn"),
533
+ ("Portuguese", "por_Latn"),
534
+ ("Dari", "prs_Arab"),
535
+ ("Southern Pashto", "pbt_Arab"),
536
+ ("Ayacucho Quechua", "quy_Latn"),
537
+ ("Romanian", "ron_Latn"),
538
+ ("Rundi", "run_Latn"),
539
+ ("Russian", "rus_Cyrl"),
540
+ ("Sango", "sag_Latn"),
541
+ ("Sanskrit", "san_Deva"),
542
+ ("Santali", "sat_Olck"),
543
+ ("Sicilian", "scn_Latn"),
544
+ ("Shan", "shn_Mymr"),
545
+ ("Sinhala", "sin_Sinh"),
546
+ ("Slovak", "slk_Latn"),
547
+ ("Slovenian", "slv_Latn"),
548
+ ("Samoan", "smo_Latn"),
549
+ ("Shona", "sna_Latn"),
550
+ ("Sindhi", "snd_Arab"),
551
+ ("Somali", "som_Latn"),
552
+ ("Southern Sotho", "sot_Latn"),
553
+ ("Spanish", "spa_Latn"),
554
+ ("Tosk Albanian", "als_Latn"),
555
+ ("Sardinian", "srd_Latn"),
556
+ ("Serbian", "srp_Cyrl"),
557
+ ("Swati", "ssw_Latn"),
558
+ ("Sundanese", "sun_Latn"),
559
+ ("Swedish", "swe_Latn"),
560
+ ("Swahili", "swh_Latn"),
561
+ ("Silesian", "szl_Latn"),
562
+ ("Tamil", "tam_Taml"),
563
+ ("Tatar", "tat_Cyrl"),
564
+ ("Telugu", "tel_Telu"),
565
+ ("Tajik", "tgk_Cyrl"),
566
+ ("Tagalog", "tgl_Latn"),
567
+ ("Thai", "tha_Thai"),
568
+ ("Tigrinya", "tir_Ethi"),
569
+ ("Tamasheq (Latin script)", "taq_Latn"),
570
+ ("Tamasheq (Tifinagh script)", "taq_Tfng"),
571
+ ("Tok Pisin", "tpi_Latn"),
572
+ ("Tswana", "tsn_Latn"),
573
+ ("Tsonga", "tso_Latn"),
574
+ ("Turkmen", "tuk_Latn"),
575
+ ("Tumbuka", "tum_Latn"),
576
+ ("Turkish", "tur_Latn"),
577
+ ("Twi", "twi_Latn"),
578
+ ("Central Atlas Tamazight", "tzm_Tfng"),
579
+ ("Uyghur", "uig_Arab"),
580
+ ("Ukrainian", "ukr_Cyrl"),
581
+ ("Umbundu", "umb_Latn"),
582
+ ("Urdu", "urd_Arab"),
583
+ ("Northern Uzbek", "uzn_Latn"),
584
+ ("Venetian", "vec_Latn"),
585
+ ("Vietnamese", "vie_Latn"),
586
+ ("Waray", "war_Latn"),
587
+ ("Wolof", "wol_Latn"),
588
+ ("Xhosa", "xho_Latn"),
589
+ ("Eastern Yiddish", "ydd_Hebr"),
590
+ ("Yoruba", "yor_Latn"),
591
+ ("Yue Chinese", "yue_Hant"),
592
+ ("Chinese (Simplified)", "zho_Hans"),
593
+ ("Chinese (Traditional)", "zho_Hant"),
594
+ ("Standard Malay", "zsm_Latn"),
595
+ ("Zulu", "zul_Latn"),
596
+ ]
597
+
598
+ WMT22_LANGS = [
599
+ ("afr", "eng"),
600
+ ("afr", "som"),
601
+ ("amh", "eng"),
602
+ ("amh", "fra"),
603
+ ("amh", "nya"),
604
+ ("amh", "orm"),
605
+ ("amh", "sna"),
606
+ ("amh", "som"),
607
+ ("amh", "ssw"),
608
+ ("amh", "swh"),
609
+ ("amh", "tsn"),
610
+ ("amh", "tso"),
611
+ ("amh", "umb"),
612
+ ("amh", "xho"),
613
+ ("amh", "yor"),
614
+ ("amh", "zul"),
615
+ ("eng", "fuv"),
616
+ ("eng", "hau"),
617
+ ("eng", "ibo"),
618
+ ("eng", "kam"),
619
+ ("eng", "kin"),
620
+ ("eng", "lin"),
621
+ ("eng", "lug"),
622
+ ("eng", "luo"),
623
+ ("eng", "nso"),
624
+ ("eng", "nya"),
625
+ ("eng", "orm"),
626
+ ("eng", "sna"),
627
+ ("eng", "som"),
628
+ ("eng", "ssw"),
629
+ ("eng", "swh"),
630
+ ("eng", "tsn"),
631
+ ("eng", "tso"),
632
+ ("eng", "umb"),
633
+ ("eng", "wol"),
634
+ ("eng", "xho"),
635
+ ("eng", "yor"),
636
+ ("eng", "zul"),
637
+ ("fra", "hau"),
638
+ ("fra", "ibo"),
639
+ ("fra", "kam"),
640
+ ("fra", "kin"),
641
+ ("fra", "lin"),
642
+ ("fra", "lug"),
643
+ ("fra", "luo"),
644
+ ("fra", "nso"),
645
+ ("fra", "nya"),
646
+ ("fra", "orm"),
647
+ ("fra", "som"),
648
+ ("fra", "ssw"),
649
+ ("fra", "swh"),
650
+ ("fra", "tsn"),
651
+ ("fra", "tso"),
652
+ ("fra", "umb"),
653
+ ("fra", "wol"),
654
+ ("fra", "xho"),
655
+ ("fra", "zul"),
656
+ ("fuv", "hau"),
657
+ ("fuv", "ibo"),
658
+ ("fuv", "kam"),
659
+ ("fuv", "kin"),
660
+ ("fuv", "lug"),
661
+ ("fuv", "luo"),
662
+ ("fuv", "nso"),
663
+ ("fuv", "nya"),
664
+ ("fuv", "orm"),
665
+ ("fuv", "sna"),
666
+ ("fuv", "som"),
667
+ ("fuv", "ssw"),
668
+ ("fuv", "swh"),
669
+ ("fuv", "tsn"),
670
+ ("fuv", "tso"),
671
+ ("fuv", "umb"),
672
+ ("fuv", "xho"),
673
+ ("fuv", "yor"),
674
+ ("fuv", "zul"),
675
+ ("hau", "ibo"),
676
+ ("hau", "kam"),
677
+ ("hau", "kin"),
678
+ ("hau", "lug"),
679
+ ("hau", "luo"),
680
+ ("hau", "nso"),
681
+ ("hau", "nya"),
682
+ ("hau", "orm"),
683
+ ("hau", "sna"),
684
+ ("hau", "som"),
685
+ ("hau", "ssw"),
686
+ ("hau", "swh"),
687
+ ("hau", "tsn"),
688
+ ("hau", "tso"),
689
+ ("hau", "umb"),
690
+ ("hau", "xho"),
691
+ ("hau", "yor"),
692
+ ("hau", "zul"),
693
+ ("ibo", "kam"),
694
+ ("ibo", "kin"),
695
+ ("ibo", "lug"),
696
+ ("ibo", "luo"),
697
+ ("ibo", "nso"),
698
+ ("ibo", "nya"),
699
+ ("ibo", "orm"),
700
+ ("ibo", "sna"),
701
+ ("ibo", "som"),
702
+ ("ibo", "ssw"),
703
+ ("ibo", "swh"),
704
+ ("ibo", "tsn"),
705
+ ("ibo", "tso"),
706
+ ("ibo", "umb"),
707
+ ("ibo", "xho"),
708
+ ("ibo", "yor"),
709
+ ("ibo", "zul"),
710
+ ("kam", "kin"),
711
+ ("kam", "lug"),
712
+ ("kam", "luo"),
713
+ ("kam", "nso"),
714
+ ("kam", "nya"),
715
+ ("kam", "orm"),
716
+ ("kam", "sna"),
717
+ ("kam", "som"),
718
+ ("kam", "ssw"),
719
+ ("kam", "swh"),
720
+ ("kam", "tsn"),
721
+ ("kam", "tso"),
722
+ ("kam", "umb"),
723
+ ("kam", "xho"),
724
+ ("kam", "yor"),
725
+ ("kam", "zul"),
726
+ ("kin", "lug"),
727
+ ("kin", "luo"),
728
+ ("kin", "nso"),
729
+ ("kin", "nya"),
730
+ ("kin", "orm"),
731
+ ("kin", "sna"),
732
+ ("kin", "som"),
733
+ ("kin", "ssw"),
734
+ ("kin", "swh"),
735
+ ("kin", "tsn"),
736
+ ("kin", "tso"),
737
+ ("kin", "umb"),
738
+ ("kin", "xho"),
739
+ ("kin", "yor"),
740
+ ("kin", "zul"),
741
+ ("lug", "luo"),
742
+ ("lug", "nso"),
743
+ ("lug", "nya"),
744
+ ("lug", "orm"),
745
+ ("lug", "sna"),
746
+ ("lug", "som"),
747
+ ("lug", "ssw"),
748
+ ("lug", "swh"),
749
+ ("lug", "tsn"),
750
+ ("lug", "tso"),
751
+ ("lug", "umb"),
752
+ ("lug", "xho"),
753
+ ("lug", "yor"),
754
+ ("lug", "zul"),
755
+ ("luo", "nso"),
756
+ ("luo", "nya"),
757
+ ("luo", "orm"),
758
+ ("luo", "sna"),
759
+ ("luo", "som"),
760
+ ("luo", "ssw"),
761
+ ("luo", "swh"),
762
+ ("luo", "tsn"),
763
+ ("luo", "tso"),
764
+ ("luo", "umb"),
765
+ ("luo", "xho"),
766
+ ("luo", "yor"),
767
+ ("luo", "zul"),
768
+ ("nso", "nya"),
769
+ ("nso", "orm"),
770
+ ("nso", "sna"),
771
+ ("nso", "som"),
772
+ ("nso", "ssw"),
773
+ ("nso", "swh"),
774
+ ("nso", "tsn"),
775
+ ("nso", "tso"),
776
+ ("nso", "umb"),
777
+ ("nso", "xho"),
778
+ ("nso", "yor"),
779
+ ("nso", "zul"),
780
+ ("nya", "orm"),
781
+ ("nya", "sna"),
782
+ ("nya", "som"),
783
+ ("nya", "ssw"),
784
+ ("nya", "swh"),
785
+ ("nya", "tsn"),
786
+ ("nya", "tso"),
787
+ ("nya", "umb"),
788
+ ("nya", "xho"),
789
+ ("nya", "yor"),
790
+ ("nya", "zul"),
791
+ ("orm", "sna"),
792
+ ("orm", "som"),
793
+ ("orm", "ssw"),
794
+ ("orm", "swh"),
795
+ ("orm", "tsn"),
796
+ ("orm", "tso"),
797
+ ("orm", "umb"),
798
+ ("orm", "xho"),
799
+ ("orm", "yor"),
800
+ ("orm", "zul"),
801
+ ("sna", "som"),
802
+ ("sna", "ssw"),
803
+ ("sna", "swh"),
804
+ ("sna", "tsn"),
805
+ ("sna", "tso"),
806
+ ("sna", "umb"),
807
+ ("sna", "xho"),
808
+ ("sna", "yor"),
809
+ ("sna", "zul"),
810
+ ("som", "ssw"),
811
+ ("som", "swh"),
812
+ ("som", "tsn"),
813
+ ("som", "tso"),
814
+ ("som", "umb"),
815
+ ("som", "wol"),
816
+ ("som", "xho"),
817
+ ("som", "yor"),
818
+ ("som", "zul"),
819
+ ("ssw", "swh"),
820
+ ("ssw", "tsn"),
821
+ ("ssw", "tso"),
822
+ ("ssw", "umb"),
823
+ ("ssw", "xho"),
824
+ ("ssw", "yor"),
825
+ ("ssw", "zul"),
826
+ ("swh", "tsn"),
827
+ ("swh", "tso"),
828
+ ("swh", "umb"),
829
+ ("swh", "xho"),
830
+ ("swh", "yor"),
831
+ ("swh", "zul"),
832
+ ("tsn", "tso"),
833
+ ("tsn", "umb"),
834
+ ("tsn", "xho"),
835
+ ("tsn", "yor"),
836
+ ("tsn", "zul"),
837
+ ("tso", "umb"),
838
+ ("tso", "xho"),
839
+ ("tso", "yor"),
840
+ ("tso", "zul"),
841
+ ("umb", "xho"),
842
+ ("umb", "yor"),
843
+ ("umb", "zul"),
844
+ ("xho", "yor"),
845
+ ("xho", "zul"),
846
+ ("yor", "zul"),
847
+ ]
848
+
849
+ # Copied from metadata
850
+ BLOOM_LANGS = """
851
+ - ak
852
+ - ar
853
+ - as
854
+ - bm
855
+ - bn
856
+ - ca
857
+ - code
858
+ - en
859
+ - es
860
+ - eu
861
+ - fon
862
+ - fr
863
+ - gu
864
+ - hi
865
+ - id
866
+ - ig
867
+ - ki
868
+ - kn
869
+ - lg
870
+ - ln
871
+ - ml
872
+ - mr
873
+ - ne
874
+ - nso
875
+ - ny
876
+ - or
877
+ - pa
878
+ - pt
879
+ - rn
880
+ - rw
881
+ - sn
882
+ - st
883
+ - sw
884
+ - ta
885
+ - te
886
+ - tn
887
+ - ts
888
+ - tum
889
+ - tw
890
+ - ur
891
+ - vi
892
+ - wo
893
+ - xh
894
+ - yo
895
+ - zh
896
+ - zu
897
+ """
898
+
899
+ DS_TO_LANG = {
900
+ 'Muennighoff/mbpp': 'code',
901
+ 'openai_humaneval': 'code',
902
+ "great_code": "code",
903
+ "neural_code_search": "code",
904
+ "codeparrot/codecomplex": "code",
905
+ "codeparrot/github-jupyter-text-code-pairs": "code",
906
+ "codeparrot/apps": "code",
907
+ "Fraser/python-state-changes": "code",
908
+ "codeparrot/xlcost-text-to-code": "code",
909
+ "teven/code_contests": "code",
910
+ "teven/code_docstring_corpus": "code",
911
+ "clue": "zh",
912
+ "cmn": "zh", # == zho
913
+ "npi": "ne", # == npe
914
+ "ory": "or", # == ori
915
+ "swh": "sw", # == swa
916
+ "kirundi": "rn", # == rundi
917
+ "punjabi": "pa", # == panjabi
918
+ "chinese_simplified": "zh",
919
+ "chinese_traditional": "zh",
920
+ }
921
+
922
+
923
+
924
+ bloom_lang_codes_iso3 = []
925
+ bloom_lang_codes_iso2 = []
926
+ for lang in BLOOM_LANGS.split("\n")[1:-1]:
927
+ iso2 = lang.replace("- ", "")
928
+ DS_TO_LANG[iso2] = iso2
929
+ try:
930
+ name = languages.get(alpha2=iso2)
931
+ DS_TO_LANG[name.name.lower()] = iso2
932
+ # name is e.g. 'swahili (macrolanguage)' also add swahili
933
+ DS_TO_LANG[name.name.lower().split(" ")[0]] = iso2
934
+
935
+ iso3 = name.part3
936
+ DS_TO_LANG[iso3] = iso2
937
+ except KeyError:
938
+ print(f"Could not find iso3 code for {lang}.")
939
+
940
+ # Add GEM multilingual
941
+ WIKILINGUA_LANGS = ["ar", "en", "es", "fr", "hi", "id", "pt", "vi", "zh"]
942
+ for l1_code in WIKILINGUA_LANGS:
943
+ for l2_code in WIKILINGUA_LANGS:
944
+ if l1_code == l2_code:
945
+ continue
946
+ TRAIN_DATASETS.append(("GEM/wiki_lingua", f"{l1_code}_{l2_code}"))
947
+
948
+ # Add flores200
949
+ for (l1_name, l1_code) in FLORES_LANGS:
950
+ for (l2_name, l2_code) in FLORES_LANGS:
951
+ if l1_code.split("_")[0] not in DS_TO_LANG or l2_code.split("_")[0] not in DS_TO_LANG:
952
+ print(f"Skipping as {l1_name} or {l2_name} was not pre-trained on.")
953
+ continue
954
+ elif l1_name == l2_name:
955
+ continue
956
+ TRAIN_DATASETS.append(("facebook/flores", f"{l1_code}-{l2_code}"))
957
+
958
+ # Add wmt22
959
+ for (l1_code, l2_code) in WMT22_LANGS:
960
+ if l1_code not in DS_TO_LANG or l2_code not in DS_TO_LANG:
961
+ print(f"Skipping as {l1_code} or {l2_code} was not pre-trained on.")
962
+ continue
963
+ elif l1_code == l2_code:
964
+ continue
965
+ TRAIN_DATASETS.append(("allenai/wmt22_african", f"{l1_code}-{l2_code}"))
966
+
967
+
968
+ ### DATASET CREATION ###
969
+
970
+
971
+ # Copied from promptsource.utils
972
+ def removeHyphen(example):
973
+ example_clean = {}
974
+ for key in example.keys():
975
+ if "-" in key:
976
+ new_key = key.replace("-", "_")
977
+ example_clean[new_key] = example[key]
978
+ else:
979
+ example_clean[key] = example[key]
980
+ example = example_clean
981
+ return example
982
+
983
+ def apply_template(dataset, template, strip_connection=True):
984
+ def map_fn(ex):
985
+ ex = removeHyphen(ex)
986
+ try:
987
+ inputs_and_targets = template.apply(
988
+ ex,
989
+ strip_connection=strip_connection,
990
+ truncate=True,
991
+ )
992
+ # Skip ValueError("Prompt did not produce an input and at least one target.")
993
+ # which happens for some prompts with if else clauses based on inputs producing occasional
994
+ # empty targets
995
+ except ValueError:
996
+ return {"inputs": "", "targets": ""}
997
+ if len(inputs_and_targets) == 2:
998
+ # Note that the signature changed in promptsource
999
+ # In 0.1.0 template.apply returned two strings; In >0.3.0 it retuns a str & list
1000
+ inputs, targets = inputs_and_targets
1001
+ if len(targets) > 1:
1002
+ # Safer to skip, as could be a bug
1003
+ print(f"Found targets longer than 1. Inputs: {inputs} ; Targets {targets}. Skipping.")
1004
+ return {"inputs": "", "targets": ""}
1005
+ targets = targets[0]
1006
+ return {"inputs": inputs, "targets": targets}
1007
+ # When template results in an empty example, template.apply returns [""]
1008
+ # Also, if the template gets split wrong, len can be > 2
1009
+ # We will filter these out later
1010
+ else:
1011
+ # inputs is a str by default & targets a str
1012
+ return {"inputs": "", "targets": ""}
1013
+
1014
+ def filter_fn(ex):
1015
+ return len(ex["inputs"]) > 0 and len(ex["targets"]) > 0
1016
+
1017
+ original_columns = dataset.column_names
1018
+ dataset = dataset.map(map_fn).filter(filter_fn)
1019
+ # map keeps original columns, remove them
1020
+ return dataset.remove_columns(set(original_columns) - {"inputs", "targets"})
1021
+
1022
+ def add_language_name_wikilingua(example):
1023
+ example["source_language_name"] = languages.get(alpha2=example["source_language"]).name
1024
+ example["target_language_name"] = languages.get(alpha2=example["target_language"]).name
1025
+ return example
1026
+
1027
+ def filter_l1_l2_wikilingua(example, l1, l2):
1028
+ return example["source_language"] == l1 and example["target_language"] == l2
1029
+
1030
+ def filter_empty_solution_apps(example):
1031
+ return bool(example["solutions"])
1032
+
1033
+ def add_solution_apps(example):
1034
+ example["solution"] = random.choice(json.loads(example["solutions"]))
1035
+ return example
1036
+
1037
+ def clean_code_xlcost(example):
1038
+ clean_lines = []
1039
+ cur_indent = 0
1040
+ for line in example["code"].split("NEW_LINE"):
1041
+ cur_indent += line.count("INDENT")
1042
+ cur_indent -= line.count("DEDENT")
1043
+ line = line.replace("INDENT", "").replace("DEDENT", "")
1044
+ line = line.replace("STRNEWLINE", "\n")
1045
+ line = line.replace("TABSYMBOL", "\t")
1046
+ clean_lines.append("\t" * cur_indent + line.strip())
1047
+ example["code_clean"] = "\n".join(clean_lines)
1048
+ return example
1049
+
1050
+ def write_to_jsonl_hub(ds, split="train"):
1051
+
1052
+ ### GET DATASET & LANGUAGE ###
1053
+
1054
+ ds_name, subset_name = ds
1055
+
1056
+ is_wikilingua_cross_lingual = (ds_name == "GEM/wiki_lingua") and ("_") in subset_name
1057
+
1058
+ lang_dir = DS_TO_LANG.get(ds_name, None)
1059
+ if lang_dir is None:
1060
+ lang_dir = DS_TO_LANG.get(subset_name, "en")
1061
+ if ds_name == "facebook/flores":
1062
+ lang_dir = DS_TO_LANG.get(subset_name.split("-")[-1].split("_")[0])
1063
+ elif is_wikilingua_cross_lingual or ds_name == "pasinit/xlwic":
1064
+ lang_dir = DS_TO_LANG.get(subset_name.split("_")[-1])
1065
+ elif ds_name == "xquad":
1066
+ lang_dir = DS_TO_LANG.get(subset_name.split(".")[1])
1067
+ elif ds_name == "mlqa":
1068
+ # Classify it by the target language for cross-lingual (i.e. what the loss is computed on)
1069
+ lang_dir = DS_TO_LANG.get(subset_name.split(".")[1])
1070
+ os.makedirs(lang_dir, exist_ok=True)
1071
+
1072
+ if ds_name == "Helsinki-NLP/tatoeba_mt":
1073
+ ds = load_dataset(ds_name, subset_name, ignore_verifications=True, revision="49aa20ac768eabc5a106a123549ea58053fc9b40")
1074
+ elif ds_name == "story_cloze":
1075
+ ds = load_dataset(ds_name, subset_name, data_dir=STORY_CLOZE_DIR)
1076
+ elif ds_name == "Muennighoff/xstory_cloze":
1077
+ ds = load_dataset(ds_name, subset_name, data_dir=XSTORY_CLOZE_DIR)
1078
+ else:
1079
+ ds = load_dataset(ds_name, subset_name)
1080
+
1081
+ if ds_name == "GEM/wiki_lingua":
1082
+ # Add names, e.g. Chinese for zh to use them in the jinja prompts
1083
+ ds = ds.map(add_language_name_wikilingua)
1084
+ if is_wikilingua_cross_lingual:
1085
+ # Keep only L1 -> L2 (L2 -> L1 will be a separate dataset)
1086
+ ds = ds.filter(partial(filter_l1_l2_wikilingua, l1=subset_name.split("_")[0], l2=subset_name.split("_")[1]))
1087
+ elif ds_name == "codeparrot/apps":
1088
+ ds = ds.filter(filter_empty_solution_apps).map(add_solution_apps)
1089
+ elif ds_name == "codeparrot/xlcost-text-to-code":
1090
+ ds = ds.map(clean_code_xlcost)
1091
+
1092
+ ### SELECT SPLITS ###
1093
+
1094
+ dataset_splits = list(ds.keys())
1095
+ if subset_name == "xlwic_en_zh":
1096
+ # Train set is en; val & test are zh
1097
+ dataset_splits.remove("train")
1098
+ elif ds_name == "teven/code_docstring_corpus":
1099
+ # Bad quality split
1100
+ dataset_splits.remove("class_level")
1101
+
1102
+ if split == "validation":
1103
+ if split not in dataset_splits or len(dataset_splits) == 1:
1104
+ print(f"Validation not found for {ds_name}")
1105
+ return
1106
+ dataset_splits = ["validation"]
1107
+ elif split == "train":
1108
+ # Use as much as possible
1109
+ # Would need to remove e.g. test datasets to benchmark same task performance
1110
+ if len(dataset_splits) > 1 and "validation" in dataset_splits:
1111
+ dataset_splits.remove("validation")
1112
+ # WikiLingua
1113
+ if "sampled_validation" in dataset_splits:
1114
+ dataset_splits.remove("sampled_validation")
1115
+ if "sampled_test" in dataset_splits:
1116
+ dataset_splits.remove("sampled_test")
1117
+
1118
+ ### SELECT PROMPTS ###
1119
+
1120
+ if subset_name is None:
1121
+ prompt_dataset_name = ds_name
1122
+ else:
1123
+ subset_name_prompt = subset_name
1124
+ if USE_ENGLISH_PROMPTS and ds_name in DS_TO_ENG_PROMPT:
1125
+ subset_name_prompt = DS_TO_ENG_PROMPT[ds_name]
1126
+ prompt_dataset_name = f"{ds_name}/{subset_name_prompt}"
1127
+
1128
+ prompts = DatasetTemplates(prompt_dataset_name)
1129
+
1130
+ ### PROCESS ###
1131
+
1132
+ for split in dataset_splits:
1133
+ for t_name in prompts.all_template_names:
1134
+ print(f"Running {ds_name}/{subset_name}/{split}/{t_name}")
1135
+ if SKIP_PROMPTS.get(prompt_dataset_name, {}).get(split, False):
1136
+ if ("all" in SKIP_PROMPTS[prompt_dataset_name][split]) or (t_name in SKIP_PROMPTS[prompt_dataset_name][split]):
1137
+ print(f"Skipping DS: {prompt_dataset_name} Split {split} Prompt {t_name}")
1138
+ continue
1139
+
1140
+ if ds_name == "Helsinki-NLP/tatoeba_mt":
1141
+ # E.g. translate-this-ara-eng, where eng is the target
1142
+ lang_dir = DS_TO_LANG.get(t_name.split("-")[-1].split("_")[0], "en")
1143
+ elif ds_name in ("allenai/wmt22_african", "multi_eurlex"):
1144
+ # One prompt in multi_eurlex has -source+target appended to the languages
1145
+ lang_dir = DS_TO_LANG.get(t_name.replace("-source+target", "").split("-")[-1])
1146
+
1147
+ out_path = os.path.join(
1148
+ lang_dir,
1149
+ f'xp3_{ds_name}_{subset_name}_{split}_{t_name}.jsonl'.replace("/", "_").replace(" ", "_")
1150
+ )
1151
+ if os.path.exists(out_path):
1152
+ print("Skipping as exists: ", out_path)
1153
+ continue
1154
+
1155
+ assert len(ds[split]) > 0, f"Got empty: {ds_name}"
1156
+
1157
+ try:
1158
+ if ds_name == "allenai/wmt22_african":
1159
+ # Sort by laser score, i.e. by increasing confidence & limit samples due to mediocre quality
1160
+ ds[split] = ds[split].sort("laser_score", reverse=True)
1161
+ max_range = min(len(ds[split]), MAX_EXAMPLES_PER_DATASET_PROMPT // 2)
1162
+ else:
1163
+ # Allow 5x buffer for empty examples
1164
+ max_range = min(len(ds[split]), MAX_EXAMPLES_PER_DATASET_PROMPT * 5)
1165
+ # Shuffle to avoid using the same subset
1166
+ # Leave \n in-between input & targets for code
1167
+ out_ds = apply_template(
1168
+ dataset=ds[split].shuffle().select(list(range(max_range))),
1169
+ template=prompts[t_name],
1170
+ strip_connection=False if lang_dir == "code" else True
1171
+ )
1172
+ # Keep X shortest examples
1173
+ max_range = min(len(out_ds), MAX_EXAMPLES_PER_DATASET_PROMPT)
1174
+ out_ds = out_ds.sort("inputs").select(list(range(max_range)))
1175
+ except Exception as e:
1176
+ print(f"Skipping due to {e}. DS: {ds_name}/{subset_name} Template: {t_name}")
1177
+ continue
1178
+ # Do not force ascii to allow chars like é
1179
+ if len(out_ds) > 0:
1180
+ out_ds.to_json(out_path, orient="records", lines=True, force_ascii=False)
1181
+
1182
+ # Testing:
1183
+ #TRAIN_DATASETS = [
1184
+ # ('xquad', 'xquad.ar'),
1185
+ #]
1186
+
1187
+ #for ds in TRAIN_DATASETS:
1188
+ # write_to_jsonl_hub(ds, split="train")
1189
+
1190
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
1191
+ pool.map(partial(write_to_jsonl_hub, split="train"), TRAIN_DATASETS)
1192
+ pool.map(partial(write_to_jsonl_hub, split="validation"), TRAIN_DATASETS)
1193
+ #pool.map(partial(write_to_jsonl_hub, split="train"), ADD_TRAIN_DATASETS_L1_XP3ALL)
1194
+ #pool.map(partial(write_to_jsonl_hub, split="validation"), ADD_TRAIN_DATASETS_L1_XP3ALL)
data/xp3/prepare_xp3_train.slurm ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=prepare-xp3 # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=compil
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-prod
15
+ conda activate thomas_t_zero_evaluation
16
+
17
+ cd /gpfswork/rech/six/commun/bigscience-training/jsonls/xp3long/train/
18
+ python /gpfswork/rech/six/commun/bigscience-training/jsonls/xp3long/train/prepare_xp3.py
data/xp3/update_jsonls.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import os
4
+ import multiprocessing
5
+
6
+ jsonl_files = glob.glob("/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3capped/*/*/*.jsonl")
7
+ print(jsonl_files)
8
+
9
+ #for path in jsonl_files:
10
+ def update_jsonl(path):
11
+ print(path)
12
+ with open(path, "r") as jsonl_file, open(path.replace(".jsonl", "tmp.jsonl"), "w") as jsonl_file_out:
13
+ for line in jsonl_file:
14
+ data = json.loads(line)
15
+ data["targets"] = data["targets"][0]
16
+ jsonl_file_out.write(json.dumps(data) + "\n")
17
+ os.rename(path.replace(".jsonl", "tmp.jsonl"), path)
18
+
19
+
20
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()-5) as pool:
21
+ pool.map(update_jsonl, jsonl_files)
data/xp3/xp3_jsonl_to_meg.slurm ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3jsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+ #SBATCH --qos=qos_cpu-t3
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ export HF_DATASETS_OFFLINE=1
17
+ export TRANSFORMERS_OFFLINE=1
18
+
19
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
20
+
21
+ TOKENIZER_PATH="bigscience/tokenizer"
22
+
23
+ LANGS=(
24
+ ak
25
+ ar
26
+ as
27
+ bm
28
+ bn
29
+ ca
30
+ code
31
+ en
32
+ es
33
+ eu
34
+ fon
35
+ fr
36
+ gu
37
+ hi
38
+ id
39
+ ig
40
+ ki
41
+ kn
42
+ lg
43
+ ln
44
+ ml
45
+ mr
46
+ ne
47
+ nso
48
+ ny
49
+ or
50
+ pa
51
+ pt
52
+ rn
53
+ rw
54
+ sn
55
+ st
56
+ sw
57
+ ta
58
+ te
59
+ tn
60
+ ts
61
+ tum
62
+ tw
63
+ ur
64
+ vi
65
+ wo
66
+ xh
67
+ yo
68
+ zh
69
+ zu
70
+ )
71
+
72
+
73
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3capped/train
74
+
75
+ for val in {0..45}; do
76
+ LANG=${LANGS[$val]}
77
+ cd $DATA_PATH/$LANG
78
+ cat *.jsonl > merged_dups_$LANG.jsonl
79
+ # Drop duplicates (~1G / 37G for en)
80
+ sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl
81
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3cappednew/train/xp3_train_$LANG
82
+ cd $MEGATRON_DEEPSPEED_REPO
83
+ python tools/preprocess_data.py \
84
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
85
+ --output-prefix $OUTPUT \
86
+ --dataset-impl mmap \
87
+ --json-key inputs \
88
+ --tokenizer-type PretrainedFromHF \
89
+ --tokenizer-name-or-path $TOKENIZER_PATH \
90
+ --workers 35
91
+ python tools/preprocess_data.py \
92
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
93
+ --output-prefix $OUTPUT \
94
+ --dataset-impl mmap \
95
+ --json-key targets \
96
+ --tokenizer-type PretrainedFromHF \
97
+ --tokenizer-name-or-path $TOKENIZER_PATH \
98
+ --append-eod \
99
+ --prepend-space \
100
+ --workers 35
101
+ done
102
+
103
+ # No val data for other langs
104
+ LANGS=(
105
+ ar
106
+ bn
107
+ code
108
+ en
109
+ es
110
+ fr
111
+ hi
112
+ id
113
+ pt
114
+ sw
115
+ te
116
+ vi
117
+ zh
118
+ )
119
+
120
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3capped/validation
121
+ cd $DATA_PATH
122
+
123
+
124
+ for val in {0..12}; do
125
+ LANG=${LANGS[$val]}
126
+ cd $DATA_PATH/$LANG
127
+ cat *.jsonl > merged_dups_$LANG.jsonl
128
+ # Drop duplicates (~1G / 37G for en)
129
+ sort -u merged_dups_$LANG.jsonl > merged_$LANG.jsonl
130
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3cappednew/validation/xp3_validation_$LANG
131
+ cd $MEGATRON_DEEPSPEED_REPO
132
+ python tools/preprocess_data.py \
133
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
134
+ --output-prefix $OUTPUT \
135
+ --dataset-impl mmap \
136
+ --json-key inputs \
137
+ --tokenizer-type PretrainedFromHF \
138
+ --tokenizer-name-or-path $TOKENIZER_PATH \
139
+ --workers 35
140
+ python tools/preprocess_data.py \
141
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
142
+ --output-prefix $OUTPUT \
143
+ --dataset-impl mmap \
144
+ --json-key targets \
145
+ --tokenizer-type PretrainedFromHF \
146
+ --tokenizer-name-or-path $TOKENIZER_PATH \
147
+ --append-eod \
148
+ --prepend-space \
149
+ --workers 35
150
+ done
data/xp3/xp3cappedmixed_jsonl_to_meg.slurm ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mixedjsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+ #SBATCH --qos=qos_cpu-t3
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ export HF_DATASETS_OFFLINE=1
17
+ export TRANSFORMERS_OFFLINE=1
18
+
19
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
20
+
21
+ TOKENIZER_PATH="bigscience/tokenizer"
22
+
23
+ LANGS=(
24
+ ak
25
+ ar
26
+ as
27
+ bm
28
+ bn
29
+ ca
30
+ code
31
+ en
32
+ es
33
+ eu
34
+ fon
35
+ fr
36
+ gu
37
+ hi
38
+ id
39
+ ig
40
+ ki
41
+ kn
42
+ lg
43
+ ln
44
+ ml
45
+ mr
46
+ ne
47
+ nso
48
+ ny
49
+ or
50
+ pa
51
+ pt
52
+ rn
53
+ rw
54
+ sn
55
+ st
56
+ sw
57
+ ta
58
+ te
59
+ tn
60
+ ts
61
+ tum
62
+ tw
63
+ ur
64
+ vi
65
+ wo
66
+ xh
67
+ yo
68
+ zh
69
+ zu
70
+ )
71
+
72
+
73
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3cappedmixedfixlong
74
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3cappedmixedfixlong
75
+
76
+ mkdir -p $OUTPUT
77
+
78
+ for val in {0..45}; do
79
+ LANG=${LANGS[$val]}
80
+ cd $DATA_PATH/$LANG
81
+ # Merge
82
+ cat *.jsonl > merged_dups_$LANG.jsonl
83
+ # Drop duplicates (~1G / 37G for en) + Shuffle
84
+ sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl
85
+ cd $MEGATRON_DEEPSPEED_REPO
86
+ python tools/preprocess_data.py \
87
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
88
+ --output-prefix $OUTPUT/xp3_$LANG \
89
+ --dataset-impl mmap \
90
+ --json-key inputs \
91
+ --tokenizer-type PretrainedFromHF \
92
+ --tokenizer-name-or-path $TOKENIZER_PATH \
93
+ --workers 35
94
+ python tools/preprocess_data.py \
95
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
96
+ --output-prefix $OUTPUT/xp3_$LANG \
97
+ --dataset-impl mmap \
98
+ --json-key targets \
99
+ --tokenizer-type PretrainedFromHF \
100
+ --tokenizer-name-or-path $TOKENIZER_PATH \
101
+ --append-eod \
102
+ --prepend-space \
103
+ --workers 35
104
+ done
data/xp3/xp3mixed_jsonl_to_meg.slurm ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=xp3mixedjsonl # job name
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+ #SBATCH --qos=qos_cpu-t3
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ export HF_DATASETS_OFFLINE=1
17
+ export TRANSFORMERS_OFFLINE=1
18
+
19
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRWORK/code/tr13f-6B3-ml-t0/Megatron-DeepSpeed
20
+
21
+ TOKENIZER_PATH="bigscience/tokenizer"
22
+
23
+ LANGS=(
24
+ ak
25
+ ar
26
+ as
27
+ bm
28
+ bn
29
+ ca
30
+ code
31
+ en
32
+ es
33
+ eu
34
+ fon
35
+ fr
36
+ gu
37
+ hi
38
+ id
39
+ ig
40
+ ki
41
+ kn
42
+ lg
43
+ ln
44
+ ml
45
+ mr
46
+ ne
47
+ nso
48
+ ny
49
+ or
50
+ pa
51
+ pt
52
+ rn
53
+ rw
54
+ sn
55
+ st
56
+ sw
57
+ ta
58
+ te
59
+ tn
60
+ ts
61
+ tum
62
+ tw
63
+ ur
64
+ vi
65
+ wo
66
+ xh
67
+ yo
68
+ zh
69
+ zu
70
+ )
71
+
72
+
73
+ DATA_PATH=/gpfswork/rech/six/commun/bigscience-training/jsonls/xp3mixed
74
+
75
+ for val in {0..45}; do
76
+ LANG=${LANGS[$val]}
77
+ cd $DATA_PATH/$LANG
78
+ # Merge
79
+ cat *.jsonl > merged_dups_$LANG.jsonl
80
+ # Drop duplicates (~1G / 37G for en) + Shuffle
81
+ sort -u merged_dups_$LANG.jsonl | shuf > merged_$LANG.jsonl
82
+ OUTPUT=/gpfswork/rech/six/commun/bigscience-training/xp3mixed/xp3_$LANG
83
+ cd $MEGATRON_DEEPSPEED_REPO
84
+ python tools/preprocess_data.py \
85
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
86
+ --output-prefix $OUTPUT \
87
+ --dataset-impl mmap \
88
+ --json-key inputs \
89
+ --tokenizer-type PretrainedFromHF \
90
+ --tokenizer-name-or-path $TOKENIZER_PATH \
91
+ --workers 35
92
+ python tools/preprocess_data.py \
93
+ --input $DATA_PATH/$LANG/merged_$LANG.jsonl \
94
+ --output-prefix $OUTPUT \
95
+ --dataset-impl mmap \
96
+ --json-key targets \
97
+ --tokenizer-type PretrainedFromHF \
98
+ --tokenizer-name-or-path $TOKENIZER_PATH \
99
+ --append-eod \
100
+ --prepend-space \
101
+ --workers 35
102
+ done
inference/README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference
2
+
3
+ Notes on the plans to do inference with the pre-trained model
4
+
5
+ # Large Model on limited hardware
6
+
7
+ - inferencing and tinkering on a single host (150-200B model)
8
+
9
+ Solution: We can do this with ZeRO-Infinity. Seems like @Shaden Smith already has the code to load the model parameters checkpoints from Megatron+DeepSpeed 3D to Megatron+ DeepSpeed ZeRO-Infinity. The remaining work is to add an inference only mode to ZeRO-Infinity that drops all the non-parameter states.
10
+
11
+ Hardware Requirements : Would require about 500-1000 GB of memory (can be CPU, GPU or NVMe). Single Node with enough CPU or NVMe memory should work here.
12
+
13
+ The single node can be as little as 4x 32GB-V100. It will be just slower than say, 8x 80GB-A100.
14
+
15
+ Estimated Work: If all works as expected, 1-3 weeks based on bandwidth availability. Tuning for the best performance might another week or so, but that wont be blocking the availability of the functionality.
inference/modeling_gpt2_alibi_prefix_lm.py ADDED
@@ -0,0 +1,1750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """PyTorch OpenAI GPT-2 model with AliBi."""
17
+
18
+ ## integrating some AliBi code from https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/c839a8aa30731f71b3738d56009be9668508e366/megatron/model/transformer.py
19
+ # I am keeping the name of the classes as GPT2 because some of transformer's code like pipeline classes check class names in order to do things, and
20
+ # creating a new class that have different names sometimes break things.
21
+
22
+ import os
23
+ import enum
24
+ from dataclasses import dataclass
25
+ from typing import Optional, Tuple
26
+
27
+ import torch
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import CrossEntropyLoss, MSELoss
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.file_utils import (
34
+ ModelOutput,
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ replace_return_docstrings,
39
+ )
40
+ from transformers.modeling_outputs import (
41
+ BaseModelOutputWithPastAndCrossAttentions,
42
+ CausalLMOutputWithCrossAttentions,
43
+ SequenceClassifierOutputWithPast,
44
+ TokenClassifierOutput,
45
+ )
46
+ from transformers.modeling_utils import (
47
+ Conv1D,
48
+ PreTrainedModel,
49
+ SequenceSummary,
50
+ find_pruneable_heads_and_indices,
51
+ prune_conv1d_layer,
52
+ )
53
+ from transformers.utils import logging
54
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
55
+ from transformers.models.gpt2.configuration_gpt2 import GPT2Config
56
+
57
+ from collections import OrderedDict
58
+ from typing import Any, Mapping, Optional
59
+
60
+ from transformers import PreTrainedTokenizer, TensorType, is_torch_available
61
+
62
+ from transformers.configuration_utils import PretrainedConfig
63
+ from transformers.onnx import OnnxConfigWithPast
64
+
65
+
66
+
67
+ logger = logging.get_logger(__name__)
68
+
69
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
70
+ "gpt2": "https://huggingface.co/gpt2/resolve/main/config.json",
71
+ "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/config.json",
72
+ "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/config.json",
73
+ "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/config.json",
74
+ "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/config.json",
75
+ }
76
+
77
+ PositionEmbeddingType_rotary = 1 # not implemented
78
+ PositionEmbeddingType_absolute = 2
79
+ PositionEmbeddingType_alibi = 3
80
+
81
+
82
+ class GPT2Config(PretrainedConfig):
83
+ """
84
+ This is the configuration class to store the configuration of a :class:`~transformers.GPT2Model` or a
85
+ :class:`~transformers.TFGPT2Model`. It is used to instantiate a GPT-2 model according to the specified arguments,
86
+ defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
87
+ to that of the GPT-2 `small <https://huggingface.co/gpt2>`__ architecture.
88
+ Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
89
+ outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
90
+ Args:
91
+ vocab_size (:obj:`int`, `optional`, defaults to 50257):
92
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
93
+ :obj:`inputs_ids` passed when calling :class:`~transformers.GPT2Model` or
94
+ :class:`~transformers.TFGPT2Model`.
95
+ n_positions (:obj:`int`, `optional`, defaults to 1024):
96
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
97
+ just in case (e.g., 512 or 1024 or 2048).
98
+ n_ctx (:obj:`int`, `optional`, defaults to 1024):
99
+ Dimensionality of the causal mask (usually same as n_positions).
100
+ n_embd (:obj:`int`, `optional`, defaults to 768):
101
+ Dimensionality of the embeddings and hidden states.
102
+ n_layer (:obj:`int`, `optional`, defaults to 12):
103
+ Number of hidden layers in the Transformer encoder.
104
+ n_head (:obj:`int`, `optional`, defaults to 12):
105
+ Number of attention heads for each attention layer in the Transformer encoder.
106
+ n_inner (:obj:`int`, `optional`, defaults to None):
107
+ Dimensionality of the inner feed-forward layers. :obj:`None` will set it to 4 times n_embd
108
+ activation_function (:obj:`str`, `optional`, defaults to :obj:`"gelu"`):
109
+ Activation function, to be selected in the list :obj:`["relu", "silu", "gelu", "tanh", "gelu_new"]`.
110
+ resid_pdrop (:obj:`float`, `optional`, defaults to 0.1):
111
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
112
+ embd_pdrop (:obj:`int`, `optional`, defaults to 0.1):
113
+ The dropout ratio for the embeddings.
114
+ attn_pdrop (:obj:`float`, `optional`, defaults to 0.1):
115
+ The dropout ratio for the attention.
116
+ layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
117
+ The epsilon to use in the layer normalization layers
118
+ initializer_range (:obj:`float`, `optional`, defaults to 0.02):
119
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
120
+ summary_type (:obj:`string`, `optional`, defaults to :obj:`"cls_index"`):
121
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
122
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
123
+ Has to be one of the following options:
124
+ - :obj:`"last"`: Take the last token hidden state (like XLNet).
125
+ - :obj:`"first"`: Take the first token hidden state (like BERT).
126
+ - :obj:`"mean"`: Take the mean of all tokens hidden states.
127
+ - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
128
+ - :obj:`"attn"`: Not implemented now, use multi-head attention.
129
+ summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`):
130
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
131
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
132
+ Whether or not to add a projection after the vector extraction.
133
+ summary_activation (:obj:`str`, `optional`):
134
+ Argument used when doing sequence summary. Used in for the multiple choice head in
135
+ :class:`~transformers.GPT2DoubleHeadsModel`.
136
+ Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation.
137
+ summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`):
138
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
139
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
140
+ Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes.
141
+ summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1):
142
+ Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
143
+ and :class:`~transformers.TFGPT2DoubleHeadsModel`.
144
+ The dropout ratio to be used after the projection and activation.
145
+ scale_attn_weights (:obj:`bool`, `optional`, defaults to :obj:`True`):
146
+ Scale attention weights by dividing by sqrt(hidden_size)..
147
+ use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
148
+ Whether or not the model should return the last key/values attentions (not used by all models).
149
+ Example::
150
+ >>> from transformers import GPT2Model, GPT2Config
151
+ >>> # Initializing a GPT2 configuration
152
+ >>> configuration = GPT2Config()
153
+ >>> # Initializing a model from the configuration
154
+ >>> model = GPT2Model(configuration)
155
+ >>> # Accessing the model configuration
156
+ >>> configuration = model.config
157
+ """
158
+
159
+ model_type = "gpt2"
160
+ keys_to_ignore_at_inference = ["past_key_values"]
161
+ attribute_map = {
162
+ "hidden_size": "n_embd",
163
+ "max_position_embeddings": "n_positions",
164
+ "num_attention_heads": "n_head",
165
+ "num_hidden_layers": "n_layer",
166
+ }
167
+
168
+ def __init__(
169
+ self,
170
+ vocab_size=50257,
171
+ n_positions=1024,
172
+ n_ctx=1024,
173
+ n_embd=768,
174
+ n_layer=12,
175
+ n_head=12,
176
+ n_inner=None,
177
+ activation_function="gelu_new",
178
+ resid_pdrop=0.1,
179
+ embd_pdrop=0.1,
180
+ attn_pdrop=0.1,
181
+ layer_norm_epsilon=1e-5,
182
+ initializer_range=0.02,
183
+ summary_type="cls_index",
184
+ summary_use_proj=True,
185
+ summary_activation=None,
186
+ summary_proj_to_labels=True,
187
+ summary_first_dropout=0.1,
188
+ scale_attn_weights=True,
189
+ use_cache=True,
190
+ bos_token_id=50256,
191
+ eos_token_id=50256,
192
+ position_embedding_type=PositionEmbeddingType_absolute,
193
+ **kwargs
194
+ ):
195
+ self.vocab_size = vocab_size
196
+ self.n_ctx = n_ctx
197
+ self.n_positions = n_positions
198
+ self.n_embd = n_embd
199
+ self.n_layer = n_layer
200
+ self.n_head = n_head
201
+ self.n_inner = n_inner
202
+ self.activation_function = activation_function
203
+ self.resid_pdrop = resid_pdrop
204
+ self.embd_pdrop = embd_pdrop
205
+ self.attn_pdrop = attn_pdrop
206
+ self.layer_norm_epsilon = layer_norm_epsilon
207
+ self.initializer_range = initializer_range
208
+ self.summary_type = summary_type
209
+ self.summary_use_proj = summary_use_proj
210
+ self.summary_activation = summary_activation
211
+ self.summary_first_dropout = summary_first_dropout
212
+ self.summary_proj_to_labels = summary_proj_to_labels
213
+ self.scale_attn_weights = scale_attn_weights
214
+ self.use_cache = use_cache
215
+
216
+ self.bos_token_id = bos_token_id
217
+ self.eos_token_id = eos_token_id
218
+ self.position_embedding_type = position_embedding_type
219
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
220
+
221
+
222
+ class GPT2OnnxConfig(OnnxConfigWithPast):
223
+ @property
224
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
225
+ common_inputs = OrderedDict({"input_ids": {0: "batch"}})
226
+ if self.use_past:
227
+ for i in range(self._config.n_layer * 2):
228
+ common_inputs[f"past_key_values.{i}"] = {0: "batch", 2: "sequence"}
229
+
230
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
231
+ else:
232
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
233
+
234
+ return common_inputs
235
+
236
+ @property
237
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
238
+ common_outputs = OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}})
239
+ if self.use_past:
240
+ for i in range(self._config.n_layer * 2):
241
+ common_outputs[f"present.{i}"] = {0: "batch", 2: "sequence"}
242
+
243
+ return common_outputs
244
+
245
+ return common_outputs
246
+
247
+ def generate_dummy_inputs(
248
+ self,
249
+ tokenizer: PreTrainedTokenizer,
250
+ batch_size: int = -1,
251
+ seq_length: int = -1,
252
+ is_pair: bool = False,
253
+ framework: Optional[TensorType] = None,
254
+ ) -> Mapping[str, Any]:
255
+ common_inputs = super().generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
256
+
257
+ # We need to order the input in the way they appears in the forward()
258
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
259
+
260
+ # Need to add the past_keys
261
+ if self.use_past:
262
+ if not is_torch_available():
263
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
264
+ else:
265
+ import torch
266
+
267
+ batch = common_inputs["input_ids"].shape[0]
268
+ ordered_inputs["past_key_values"] = [
269
+ (
270
+ torch.zeros((batch, self._config.n_head, 1, self._config.hidden_size // self._config.n_head)),
271
+ torch.zeros((batch, self._config.n_head, 1, self._config.hidden_size // self._config.n_head)),
272
+ )
273
+ for _ in range(self._config.n_layer)
274
+ ]
275
+
276
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
277
+ return ordered_inputs
278
+
279
+
280
+ # need to change the checkpoints to be the bigscience checkpoints
281
+ _CHECKPOINT_FOR_DOC = "gpt2"
282
+ _CONFIG_FOR_DOC = "GPT2Config"
283
+ _TOKENIZER_FOR_DOC = "GPT2Tokenizer"
284
+
285
+ GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
286
+ "gpt2",
287
+ "gpt2-medium",
288
+ "gpt2-large",
289
+ "gpt2-xl",
290
+ "distilgpt2",
291
+ # See all GPT-2 models at https://huggingface.co/models?filter=gpt2
292
+ ]
293
+
294
+
295
+
296
+
297
+ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
298
+ """Load tf checkpoints in a pytorch model"""
299
+ try:
300
+ import re
301
+
302
+ import tensorflow as tf
303
+ except ImportError:
304
+ logger.error(
305
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
306
+ "https://www.tensorflow.org/install/ for installation instructions."
307
+ )
308
+ raise
309
+ tf_path = os.path.abspath(gpt2_checkpoint_path)
310
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
311
+ # Load weights from TF model
312
+ init_vars = tf.train.list_variables(tf_path)
313
+ names = []
314
+ arrays = []
315
+ for name, shape in init_vars:
316
+ logger.info(f"Loading TF weight {name} with shape {shape}")
317
+ array = tf.train.load_variable(tf_path, name)
318
+ names.append(name)
319
+ arrays.append(array.squeeze())
320
+
321
+ for name, array in zip(names, arrays):
322
+ name = name[6:] # skip "model/"
323
+ name = name.split("/")
324
+ pointer = model
325
+ for m_name in name:
326
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
327
+ scope_names = re.split(r"(\d+)", m_name)
328
+ else:
329
+ scope_names = [m_name]
330
+ if scope_names[0] == "w" or scope_names[0] == "g":
331
+ pointer = getattr(pointer, "weight")
332
+ elif scope_names[0] == "b":
333
+ pointer = getattr(pointer, "bias")
334
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
335
+ pointer = getattr(pointer, scope_names[0])
336
+ pointer = getattr(pointer, "weight")
337
+ else:
338
+ pointer = getattr(pointer, scope_names[0])
339
+ if len(scope_names) >= 2:
340
+ num = int(scope_names[1])
341
+ pointer = pointer[num]
342
+ try:
343
+ assert (
344
+ pointer.shape == array.shape
345
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
346
+ except AssertionError as e:
347
+ e.args += (pointer.shape, array.shape)
348
+ raise
349
+ logger.info(f"Initialize PyTorch weight {name}")
350
+ pointer.data = torch.from_numpy(array)
351
+ return model
352
+
353
+
354
+ class GPT2Attention(nn.Module):
355
+ def __init__(self, config, is_cross_attention=False):
356
+ super().__init__()
357
+
358
+ max_positions = config.max_position_embeddings
359
+ self.register_buffer(
360
+ "bias",
361
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
362
+ 1, 1, max_positions, max_positions
363
+ ),
364
+ )
365
+ self.register_buffer("masked_bias", torch.tensor(-1e4))
366
+
367
+ self.embed_dim = config.hidden_size
368
+ self.num_heads = config.num_attention_heads
369
+ self.head_dim = self.embed_dim // self.num_heads
370
+ self.split_size = self.embed_dim
371
+ if self.head_dim * self.num_heads != self.embed_dim:
372
+ raise ValueError(
373
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
374
+ )
375
+
376
+ self.scale_attn_weights = config.scale_attn_weights
377
+ self.is_cross_attention = is_cross_attention
378
+
379
+ if self.is_cross_attention:
380
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
381
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
382
+ else:
383
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
384
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
385
+
386
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
387
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
388
+
389
+ self.pruned_heads = set()
390
+ self.position_embedding_type = config.position_embedding_type
391
+
392
+ def prune_heads(self, heads):
393
+ if len(heads) == 0:
394
+ return
395
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
396
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
397
+
398
+ # Prune conv1d layers
399
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
400
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
401
+
402
+ # Update hyper params
403
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
404
+ self.num_heads = self.num_heads - len(heads)
405
+ self.pruned_heads = self.pruned_heads.union(heads)
406
+
407
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
408
+
409
+ # [b, np, sq, sk]
410
+ output_size = (query.size(1),
411
+ query.size(2),
412
+ query.size(0),
413
+ key.size(0))
414
+ # preallocting result tensor: [b * np, sq, sk]
415
+ if alibi is None:
416
+ matmul_result = torch.empty(
417
+ output_size[0]*output_size[1],
418
+ output_size[2],
419
+ output_size[3],
420
+ dtype=query_layer.dtype,
421
+ device=torch.cuda.current_device())
422
+ else:
423
+ matmul_result = alibi[:output_size[0]*output_size[1], :, :output_size[3]]
424
+
425
+ # [sq, b, np, hn] -> [sq, b * np, hn]
426
+ query = query.view(output_size[2],
427
+ output_size[0] * output_size[1], -1)
428
+ # [sk, b, np, hn] -> [sk, b * np, hn]
429
+ key = key.view(output_size[3],
430
+ output_size[0] * output_size[1], -1)
431
+ # Raw attention scores. [b * np, sq, sk]
432
+ attn_weights = torch.baddbmm(
433
+ matmul_result,
434
+ query_layer.transpose(0, 1), # [b * np, sq, hn]
435
+ key_layer.transpose(0, 1).transpose(-1, -2), # [b * np, hn, sk]
436
+ beta=0.0 if alibi is None else 1.0, alpha=(1.0/self.norm_factor))
437
+
438
+ #attn_weights = torch.matmul(query, key.transpose(-1, -2))
439
+
440
+ # change view to [b, np, sq, sk]
441
+ attn_weights = attn_weights.view(*output_size)
442
+
443
+ # do we need this scaling. does the alpha do the scaling as above?
444
+ if self.scale_attn_weights:
445
+ attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
446
+
447
+ if not self.is_cross_attention:
448
+ # if only "normal" attention layer implements causal mask
449
+ query_length, key_length = query.size(-2), key.size(-2)
450
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
451
+ attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
452
+
453
+ if attention_mask is not None:
454
+ # Apply the attention mask
455
+ attn_weights = attn_weights + attention_mask
456
+
457
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
458
+ attn_weights = self.attn_dropout(attn_weights)
459
+
460
+ # Mask heads if we want to
461
+ if head_mask is not None:
462
+ attn_weights = attn_weights * head_mask
463
+
464
+ attn_output = torch.matmul(attn_weights, value)
465
+
466
+ return attn_output, attn_weights
467
+
468
+ def _split_heads(self, tensor, num_heads, attn_head_size):
469
+ """
470
+ Splits hidden_size dim into attn_head_size and num_heads
471
+ """
472
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
473
+ tensor = tensor.view(*new_shape)
474
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
475
+
476
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
477
+ """
478
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
479
+ """
480
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
481
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
482
+ return tensor.view(new_shape)
483
+
484
+ def forward(
485
+ self,
486
+ hidden_states,
487
+ layer_past=None,
488
+ attention_mask=None,
489
+ head_mask=None,
490
+ encoder_hidden_states=None,
491
+ encoder_attention_mask=None,
492
+ alibi=None,
493
+ use_cache=False,
494
+ output_attentions=False,
495
+
496
+ ):
497
+ if encoder_hidden_states is not None:
498
+ if not hasattr(self, "q_attn"):
499
+ raise ValueError(
500
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
501
+ "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
502
+ )
503
+
504
+ query = self.q_attn(hidden_states)
505
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
506
+ attention_mask = encoder_attention_mask
507
+ else:
508
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
509
+
510
+ query = self._split_heads(query, self.num_heads, self.head_dim)
511
+ key = self._split_heads(key, self.num_heads, self.head_dim)
512
+ value = self._split_heads(value, self.num_heads, self.head_dim)
513
+
514
+ if layer_past is not None:
515
+ past_key, past_value = layer_past
516
+ key = torch.cat((past_key, key), dim=-2)
517
+ value = torch.cat((past_value, value), dim=-2)
518
+
519
+ if use_cache is True:
520
+ present = (key, value)
521
+ else:
522
+ present = None
523
+
524
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
525
+
526
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
527
+ attn_output = self.c_proj(attn_output)
528
+ attn_output = self.resid_dropout(attn_output)
529
+
530
+ outputs = (attn_output, present)
531
+ if output_attentions:
532
+ outputs += (attn_weights,)
533
+
534
+ return outputs # a, present, (attentions)
535
+
536
+
537
+ class GPT2MLP(nn.Module):
538
+ def __init__(self, intermediate_size, config):
539
+ super().__init__()
540
+ embed_dim = config.hidden_size
541
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
542
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
543
+ self.act = ACT2FN[config.activation_function]
544
+ self.dropout = nn.Dropout(config.resid_pdrop)
545
+
546
+ def forward(self, hidden_states):
547
+ hidden_states = self.c_fc(hidden_states)
548
+ hidden_states = self.act(hidden_states)
549
+ hidden_states = self.c_proj(hidden_states)
550
+ hidden_states = self.dropout(hidden_states)
551
+ return hidden_states
552
+
553
+
554
+ class GPT2Block(nn.Module):
555
+ def __init__(self, config):
556
+ super().__init__()
557
+ hidden_size = config.hidden_size
558
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
559
+
560
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
561
+ self.attn = GPT2Attention(config)
562
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
563
+
564
+ if config.add_cross_attention:
565
+ self.crossattention = GPT2Attention(config, is_cross_attention=True)
566
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
567
+
568
+ self.mlp = GPT2MLP(inner_dim, config)
569
+
570
+ def forward(
571
+ self,
572
+ hidden_states,
573
+ layer_past=None,
574
+ attention_mask=None,
575
+ head_mask=None,
576
+ encoder_hidden_states=None,
577
+ encoder_attention_mask=None,
578
+ alibi=None,
579
+ use_cache=False,
580
+ output_attentions=False,
581
+ ):
582
+ residual = hidden_states
583
+ hidden_states = self.ln_1(hidden_states)
584
+ attn_outputs = self.attn(
585
+ hidden_states,
586
+ layer_past=layer_past,
587
+ attention_mask=attention_mask,
588
+ head_mask=head_mask,
589
+ alibi=alibi,
590
+ use_cache=use_cache,
591
+ output_attentions=output_attentions,
592
+ )
593
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
594
+ outputs = attn_outputs[1:]
595
+ # residual connection
596
+ hidden_states = attn_output + residual
597
+
598
+ if encoder_hidden_states is not None:
599
+ # add one self-attention block for cross-attention
600
+ if not hasattr(self, "crossattention"):
601
+ raise ValueError(
602
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
603
+ "cross-attention layers by setting `config.add_cross_attention=True`"
604
+ )
605
+ residual = hidden_states
606
+ hidden_states = self.ln_cross_attn(hidden_states)
607
+ cross_attn_outputs = self.crossattention(
608
+ hidden_states,
609
+ attention_mask=attention_mask,
610
+ head_mask=head_mask,
611
+ encoder_hidden_states=encoder_hidden_states,
612
+ encoder_attention_mask=encoder_attention_mask,
613
+ alibi=alibi,
614
+ output_attentions=output_attentions,
615
+ )
616
+ attn_output = cross_attn_outputs[0]
617
+ # residual connection
618
+ hidden_states = residual + attn_output
619
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
620
+
621
+ residual = hidden_states
622
+ hidden_states = self.ln_2(hidden_states)
623
+ feed_forward_hidden_states = self.mlp(hidden_states)
624
+ # residual connection
625
+ hidden_states = residual + feed_forward_hidden_states
626
+
627
+ if use_cache:
628
+ outputs = (hidden_states,) + outputs
629
+ else:
630
+ outputs = (hidden_states,) + outputs[1:]
631
+
632
+ return outputs # hidden_states, present, (attentions, cross_attentions)
633
+
634
+
635
+ class GPT2PreTrainedModel(PreTrainedModel):
636
+ """
637
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
638
+ models.
639
+ """
640
+
641
+ config_class = GPT2Config
642
+ load_tf_weights = load_tf_weights_in_gpt2
643
+ base_model_prefix = "transformer"
644
+ is_parallelizable = True
645
+ supports_gradient_checkpointing = True
646
+
647
+ def __init__(self, *inputs, **kwargs):
648
+ super().__init__(*inputs, **kwargs)
649
+
650
+
651
+ def _init_weights(self, module):
652
+ """Initialize the weights."""
653
+ if isinstance(module, (nn.Linear, Conv1D)):
654
+ # Slightly different from the TF version which uses truncated_normal for initialization
655
+ # cf https://github.com/pytorch/pytorch/pull/5617
656
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
657
+ if module.bias is not None:
658
+ module.bias.data.zero_()
659
+ elif isinstance(module, nn.Embedding):
660
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
661
+ if module.padding_idx is not None:
662
+ module.weight.data[module.padding_idx].zero_()
663
+ elif isinstance(module, nn.LayerNorm):
664
+ module.bias.data.zero_()
665
+ module.weight.data.fill_(1.0)
666
+
667
+ def _set_gradient_checkpointing(self, module, value=False):
668
+ if isinstance(module, GPT2Model):
669
+ module.gradient_checkpointing = value
670
+
671
+
672
+
673
+ @dataclass
674
+ class GPT2DoubleHeadsModelOutput(ModelOutput):
675
+ """
676
+ Base class for outputs of models predicting if two sentences are consecutive or not.
677
+
678
+ Args:
679
+ loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
680
+ Language modeling loss.
681
+ mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
682
+ Multiple choice classification loss.
683
+ logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
684
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
685
+ mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
686
+ Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
687
+ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
688
+ Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,
689
+ sequence_length, embed_size_per_head)`).
690
+
691
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
692
+ :obj:`past_key_values` input) to speed up sequential decoding.
693
+ hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
694
+ Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
695
+ of shape :obj:`(batch_size, sequence_length, hidden_size)`.
696
+
697
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
698
+ attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
699
+ Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
700
+ sequence_length, sequence_length)`.
701
+
702
+ GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
703
+ self-attention heads.
704
+ """
705
+
706
+ loss: Optional[torch.FloatTensor] = None
707
+ mc_loss: Optional[torch.FloatTensor] = None
708
+ logits: torch.FloatTensor = None
709
+ mc_logits: torch.FloatTensor = None
710
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
711
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
712
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
713
+
714
+
715
+ GPT2_START_DOCSTRING = r"""
716
+
717
+ This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
718
+ methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
719
+ pruning heads etc.)
720
+
721
+ This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
722
+ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
723
+ general usage and behavior.
724
+
725
+ Parameters:
726
+ config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
727
+ Initializing with a config file does not load the weights associated with the model, only the
728
+ configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
729
+ weights.
730
+ """
731
+
732
+ GPT2_INPUTS_DOCSTRING = r"""
733
+ Args:
734
+ input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
735
+ :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
736
+ ``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
737
+ sequence tokens in the vocabulary.
738
+
739
+ If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
740
+ passed as ``input_ids``.
741
+
742
+ Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
743
+ :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
744
+ details.
745
+
746
+ `What are input IDs? <../glossary.html#input-ids>`__
747
+ past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):
748
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
749
+ :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
750
+ have their past given to this model should not be passed as ``input_ids`` as they have already been
751
+ computed.
752
+ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
753
+ Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
754
+
755
+ - 1 for tokens that are **not masked**,
756
+ - 0 for tokens that are **masked**.
757
+
758
+ `What are attention masks? <../glossary.html#attention-mask>`__
759
+ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
760
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
761
+ 1]``:
762
+
763
+ - 0 corresponds to a `sentence A` token,
764
+ - 1 corresponds to a `sentence B` token.
765
+
766
+ `What are token type IDs? <../glossary.html#token-type-ids>`_
767
+ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
768
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
769
+ config.max_position_embeddings - 1]``.
770
+
771
+ `What are position IDs? <../glossary.html#position-ids>`_
772
+ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
773
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
774
+
775
+ - 1 indicates the head is **not masked**,
776
+ - 0 indicates the head is **masked**.
777
+
778
+ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
779
+ Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
780
+ This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
781
+ vectors than the model's internal embedding lookup matrix.
782
+
783
+ If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
784
+ :obj:`past_key_values`).
785
+ use_cache (:obj:`bool`, `optional`):
786
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
787
+ decoding (see :obj:`past_key_values`).
788
+ output_attentions (:obj:`bool`, `optional`):
789
+ Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
790
+ tensors for more detail.
791
+ output_hidden_states (:obj:`bool`, `optional`):
792
+ Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
793
+ more detail.
794
+ return_dict (:obj:`bool`, `optional`):
795
+ Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
796
+ """
797
+ PARALLELIZE_DOCSTRING = r"""
798
+ This is an experimental feature and is a subject to change at a moment's notice.
799
+
800
+ Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
801
+ it will evenly distribute blocks across all devices.
802
+
803
+ Args:
804
+ device_map (:obj:`Dict[int, list]`, optional, defaults to None):
805
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
806
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
807
+ have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
808
+ following number of attention modules:
809
+
810
+ - gpt2: 12
811
+ - gpt2-medium: 24
812
+ - gpt2-large: 36
813
+ - gpt2-xl: 48
814
+
815
+ Example::
816
+
817
+ # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
818
+ model = GPT2LMHeadModel.from_pretrained('gpt2-xl')
819
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
820
+
821
+ 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
822
+ 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
823
+ 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}
824
+ model.parallelize(device_map)
825
+ """
826
+ DEPARALLELIZE_DOCSTRING = r"""
827
+ Moves the model to cpu from a model parallel state.
828
+
829
+ Example::
830
+
831
+ # On a 4 GPU machine with gpt2-large:
832
+ model = GPT2LMHeadModel.from_pretrained('gpt2-large')
833
+ device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],
834
+
835
+ 1: [8, 9, 10, 11, 12, 13, 14, 15],
836
+ 2: [16, 17, 18, 19, 20, 21, 22, 23],
837
+ 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}
838
+ model.parallelize(device_map) # Splits the model across several devices
839
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
840
+ """
841
+
842
+
843
+ @add_start_docstrings(
844
+ "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
845
+ GPT2_START_DOCSTRING,
846
+ )
847
+ class GPT2Model(GPT2PreTrainedModel):
848
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
849
+
850
+ def __init__(self, config):
851
+ super().__init__(config)
852
+
853
+ self.embed_dim = config.hidden_size
854
+
855
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
856
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
857
+
858
+ self.drop = nn.Dropout(config.embd_pdrop)
859
+ self.h = nn.ModuleList([GPT2Block(config) for _ in range(config.num_hidden_layers)])
860
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
861
+
862
+ self.init_weights()
863
+
864
+ # Model parallel
865
+ self.model_parallel = False
866
+ self.device_map = None
867
+ self.gradient_checkpointing = False
868
+ config = kwargs.get('config',inputs[0])
869
+ if args.position_embedding_type == PositionEmbeddingType_alibi:
870
+ self.alibi = self._build_alibi_tensor(args.seq_length, args.num_attention_heads, args.micro_batch_size).to(torch.cuda.current_device())
871
+ if args.params_dtype == torch.float16:
872
+ self.alibi = self.alibi.to(torch.float16)
873
+ elif args.params_dtype == torch.bfloat16:
874
+ self.alibi = self.alibi.to(torch.bfloat16)
875
+ else:
876
+ self.alibi = None
877
+
878
+ @staticmethod
879
+ def _build_alibi_tensor(max_seq_len, num_attention_heads, batch_size):
880
+ # Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
881
+ """Returns tensor shaped (batch_size * num_attention_heads, 1, max_seq_len)"""
882
+ def get_slopes(n):
883
+ def get_slopes_power_of_2(n):
884
+ start = (2 ** (-2 ** -(math.log2(n) - 3)))
885
+ ratio = start
886
+ return [start * ratio ** i for i in range(n)]
887
+
888
+ if math.log2(n).is_integer():
889
+ return get_slopes_power_of_2(n)
890
+ else:
891
+ closest_power_of_2 = 2 ** math.floor(math.log2(n))
892
+ return get_slopes_power_of_2(closest_power_of_2) + get_slopes(2 * closest_power_of_2)[0::2][
893
+ :n - closest_power_of_2]
894
+ slopes = torch.Tensor(get_slopes(num_attention_heads))
895
+ alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(max_seq_len).unsqueeze(0).unsqueeze(0).expand(num_attention_heads, -1, -1)
896
+ alibi = alibi.repeat(batch_size, 1, 1)
897
+ return alibi
898
+
899
+
900
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
901
+ def parallelize(self, device_map=None):
902
+ # Check validity of device_map
903
+ self.device_map = (
904
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
905
+ )
906
+ assert_device_map(self.device_map, len(self.h))
907
+ self.model_parallel = True
908
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
909
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
910
+ self.wte = self.wte.to(self.first_device)
911
+ self.wpe = self.wpe.to(self.first_device)
912
+ # Load onto devices
913
+ for k, v in self.device_map.items():
914
+ for block in v:
915
+ cuda_device = "cuda:" + str(k)
916
+ self.h[block] = self.h[block].to(cuda_device)
917
+ # ln_f to last
918
+ self.ln_f = self.ln_f.to(self.last_device)
919
+
920
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
921
+ def deparallelize(self):
922
+ self.model_parallel = False
923
+ self.device_map = None
924
+ self.first_device = "cpu"
925
+ self.last_device = "cpu"
926
+ self.wte = self.wte.to("cpu")
927
+ self.wpe = self.wpe.to("cpu")
928
+ for index in range(len(self.h)):
929
+ self.h[index] = self.h[index].to("cpu")
930
+ self.ln_f = self.ln_f.to("cpu")
931
+ torch.cuda.empty_cache()
932
+
933
+ def get_input_embeddings(self):
934
+ return self.wte
935
+
936
+ def set_input_embeddings(self, new_embeddings):
937
+ self.wte = new_embeddings
938
+
939
+ def _prune_heads(self, heads_to_prune):
940
+ """
941
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
942
+ """
943
+ for layer, heads in heads_to_prune.items():
944
+ self.h[layer].attn.prune_heads(heads)
945
+
946
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
947
+ @add_code_sample_docstrings(
948
+ tokenizer_class=_TOKENIZER_FOR_DOC,
949
+ checkpoint=_CHECKPOINT_FOR_DOC,
950
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
951
+ config_class=_CONFIG_FOR_DOC,
952
+ )
953
+ def forward(
954
+ self,
955
+ input_ids=None,
956
+ past_key_values=None,
957
+ attention_mask=None,
958
+ token_type_ids=None,
959
+ position_ids=None,
960
+ head_mask=None,
961
+ inputs_embeds=None,
962
+ encoder_hidden_states=None,
963
+ encoder_attention_mask=None,
964
+ use_cache=None,
965
+ output_attentions=None,
966
+ output_hidden_states=None,
967
+ return_dict=None,
968
+ prefix_lm_token_id = None
969
+ ):
970
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
971
+ output_hidden_states = (
972
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
973
+ )
974
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
975
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
976
+
977
+ if input_ids is not None and inputs_embeds is not None:
978
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
979
+ elif input_ids is not None:
980
+ input_shape = input_ids.size()
981
+ input_ids = input_ids.view(-1, input_shape[-1])
982
+ batch_size = input_ids.shape[0]
983
+ elif inputs_embeds is not None:
984
+ input_shape = inputs_embeds.size()[:-1]
985
+ batch_size = inputs_embeds.shape[0]
986
+ else:
987
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
988
+
989
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
990
+
991
+ if token_type_ids is not None:
992
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
993
+ if position_ids is not None:
994
+ position_ids = position_ids.view(-1, input_shape[-1])
995
+
996
+ if past_key_values is None:
997
+ past_length = 0
998
+ past_key_values = tuple([None] * len(self.h))
999
+ else:
1000
+ past_length = past_key_values[0][0].size(-2)
1001
+ if position_ids is None:
1002
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
1003
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
1004
+
1005
+ # GPT2Attention mask.
1006
+ if attention_mask is not None:
1007
+ if batch_size <= 0:
1008
+ raise ValueError("batch_size has to be defined and > 0")
1009
+ attention_mask = attention_mask.view(batch_size, -1)
1010
+ # do prefix_lm masking if we have input_ids. We find the prefix_lm_toke_id token as the prefix_lm boundry.
1011
+ if prefix_lm_token_id is not None and input_ids is not None:
1012
+ for attention_mask_row, input_ids_row in zip(attention_mask, input_ids): # do this in the bs dimension
1013
+ attention_mask_row[: (input_ids_row == prefix_lm_token_id).nonzero(as_tuple=True)[0], :] = 1.0 # is this right?
1014
+
1015
+ # We create a 3D attention mask from a 2D tensor mask.
1016
+ # Sizes are [batch_size, 1, 1, to_seq_length]
1017
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
1018
+ # this attention mask is more simple than the triangular masking of causal attention
1019
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
1020
+ attention_mask = attention_mask[:, None, None, :]
1021
+
1022
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
1023
+ # masked positions, this operation will create a tensor which is 0.0 for
1024
+ # positions we want to attend and -10000.0 for masked positions.
1025
+ # Since we are adding it to the raw scores before the softmax, this is
1026
+ # effectively the same as removing these entirely.
1027
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
1028
+ attention_mask = (1.0 - attention_mask) * -10000.0
1029
+
1030
+ # If a 2D ou 3D attention mask is provided for the cross-attention
1031
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1032
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
1033
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
1034
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
1035
+ if encoder_attention_mask is None:
1036
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
1037
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
1038
+ else:
1039
+ encoder_attention_mask = None
1040
+
1041
+ # Prepare head mask if needed
1042
+ # 1.0 in head_mask indicate we keep the head
1043
+ # attention_probs has shape bsz x n_heads x N x N
1044
+ # head_mask has shape n_layer x batch x n_heads x N x N
1045
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
1046
+
1047
+ if inputs_embeds is None:
1048
+ inputs_embeds = self.wte(input_ids)
1049
+ position_embeds = self.wpe(position_ids)
1050
+ hidden_states = inputs_embeds + position_embeds
1051
+
1052
+ if token_type_ids is not None:
1053
+ token_type_embeds = self.wte(token_type_ids)
1054
+ hidden_states = hidden_states + token_type_embeds
1055
+
1056
+ hidden_states = self.drop(hidden_states)
1057
+
1058
+ output_shape = input_shape + (hidden_states.size(-1),)
1059
+
1060
+ presents = () if use_cache else None
1061
+ all_self_attentions = () if output_attentions else None
1062
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1063
+ all_hidden_states = () if output_hidden_states else None
1064
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
1065
+
1066
+ # Model parallel
1067
+ if self.model_parallel:
1068
+ torch.cuda.set_device(hidden_states.device)
1069
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
1070
+ if layer_past is not None:
1071
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
1072
+ # Ensure that attention_mask is always on the same device as hidden_states
1073
+ if attention_mask is not None:
1074
+ attention_mask = attention_mask.to(hidden_states.device)
1075
+ if isinstance(head_mask, torch.Tensor):
1076
+ head_mask = head_mask.to(hidden_states.device)
1077
+ if output_hidden_states:
1078
+ all_hidden_states = all_hidden_states + (hidden_states,)
1079
+
1080
+ if self.gradient_checkpointing and self.training:
1081
+
1082
+ if use_cache:
1083
+ logger.warning(
1084
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1085
+ )
1086
+ use_cache = False
1087
+
1088
+ def create_custom_forward(module):
1089
+ def custom_forward(*inputs):
1090
+ # None for past_key_value
1091
+ return module(*inputs, use_cache, output_attentions)
1092
+
1093
+ return custom_forward
1094
+
1095
+ outputs = torch.utils.checkpoint.checkpoint(
1096
+ create_custom_forward(block),
1097
+ hidden_states,
1098
+ None,
1099
+ attention_mask,
1100
+ head_mask[i],
1101
+ encoder_hidden_states,
1102
+ encoder_attention_mask,
1103
+ self.alibi
1104
+ )
1105
+ else:
1106
+ outputs = block(
1107
+ hidden_states,
1108
+ layer_past=layer_past,
1109
+ attention_mask=attention_mask,
1110
+ head_mask=head_mask[i],
1111
+ encoder_hidden_states=encoder_hidden_states,
1112
+ encoder_attention_mask=encoder_attention_mask,
1113
+ use_cache=use_cache,
1114
+ output_attentions=output_attentions,
1115
+ alibi=self.alibi
1116
+ )
1117
+
1118
+ hidden_states = outputs[0]
1119
+ if use_cache is True:
1120
+ presents = presents + (outputs[1],)
1121
+
1122
+ if output_attentions:
1123
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
1124
+ if self.config.add_cross_attention:
1125
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
1126
+
1127
+ # Model Parallel: If it's the last layer for that device, put things on the next device
1128
+ if self.model_parallel:
1129
+ for k, v in self.device_map.items():
1130
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
1131
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
1132
+
1133
+ hidden_states = self.ln_f(hidden_states)
1134
+
1135
+ hidden_states = hidden_states.view(*output_shape)
1136
+ # Add last hidden state
1137
+ if output_hidden_states:
1138
+ all_hidden_states = all_hidden_states + (hidden_states,)
1139
+
1140
+ if not return_dict:
1141
+ return tuple(
1142
+ v
1143
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
1144
+ if v is not None
1145
+ )
1146
+
1147
+ return BaseModelOutputWithPastAndCrossAttentions(
1148
+ last_hidden_state=hidden_states,
1149
+ past_key_values=presents,
1150
+ hidden_states=all_hidden_states,
1151
+ attentions=all_self_attentions,
1152
+ cross_attentions=all_cross_attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
1159
+ embeddings).
1160
+ """,
1161
+ GPT2_START_DOCSTRING,
1162
+ )
1163
+ class GPT2LMHeadModel(GPT2PreTrainedModel):
1164
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
1165
+
1166
+ def __init__(self, config):
1167
+ super().__init__(config)
1168
+ self.transformer = GPT2Model(config)
1169
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1170
+
1171
+ self.init_weights()
1172
+
1173
+ # Model parallel
1174
+ self.model_parallel = False
1175
+ self.device_map = None
1176
+
1177
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1178
+ def parallelize(self, device_map=None):
1179
+ self.device_map = (
1180
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1181
+ if device_map is None
1182
+ else device_map
1183
+ )
1184
+ assert_device_map(self.device_map, len(self.transformer.h))
1185
+ self.transformer.parallelize(self.device_map)
1186
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1187
+ self.model_parallel = True
1188
+
1189
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1190
+ def deparallelize(self):
1191
+ self.transformer.deparallelize()
1192
+ self.transformer = self.transformer.to("cpu")
1193
+ self.lm_head = self.lm_head.to("cpu")
1194
+ self.model_parallel = False
1195
+ torch.cuda.empty_cache()
1196
+
1197
+ def get_output_embeddings(self):
1198
+ return self.lm_head
1199
+
1200
+ def set_output_embeddings(self, new_embeddings):
1201
+ self.lm_head = new_embeddings
1202
+
1203
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1204
+ token_type_ids = kwargs.get("token_type_ids", None)
1205
+ # only last token for inputs_ids if past is defined in kwargs
1206
+ if past:
1207
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1208
+ if token_type_ids is not None:
1209
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1210
+
1211
+ attention_mask = kwargs.get("attention_mask", None)
1212
+ position_ids = kwargs.get("position_ids", None)
1213
+
1214
+ if attention_mask is not None and position_ids is None:
1215
+ # create position_ids on the fly for batch generation
1216
+ position_ids = attention_mask.long().cumsum(-1) - 1
1217
+ position_ids.masked_fill_(attention_mask == 0, 1)
1218
+ if past:
1219
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1220
+ else:
1221
+ position_ids = None
1222
+ return {
1223
+ "input_ids": input_ids,
1224
+ "past_key_values": past,
1225
+ "use_cache": kwargs.get("use_cache"),
1226
+ "position_ids": position_ids,
1227
+ "attention_mask": attention_mask,
1228
+ "token_type_ids": token_type_ids,
1229
+ }
1230
+
1231
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1232
+ @add_code_sample_docstrings(
1233
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1234
+ checkpoint=_CHECKPOINT_FOR_DOC,
1235
+ output_type=CausalLMOutputWithCrossAttentions,
1236
+ config_class=_CONFIG_FOR_DOC,
1237
+ )
1238
+ def forward(
1239
+ self,
1240
+ input_ids=None,
1241
+ past_key_values=None,
1242
+ attention_mask=None,
1243
+ token_type_ids=None,
1244
+ position_ids=None,
1245
+ head_mask=None,
1246
+ inputs_embeds=None,
1247
+ encoder_hidden_states=None,
1248
+ encoder_attention_mask=None,
1249
+ labels=None,
1250
+ use_cache=None,
1251
+ output_attentions=None,
1252
+ output_hidden_states=None,
1253
+ return_dict=None,
1254
+ ):
1255
+ r"""
1256
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1257
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1258
+ ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
1259
+ ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
1260
+ """
1261
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1262
+
1263
+ transformer_outputs = self.transformer(
1264
+ input_ids,
1265
+ past_key_values=past_key_values,
1266
+ attention_mask=attention_mask,
1267
+ token_type_ids=token_type_ids,
1268
+ position_ids=position_ids,
1269
+ head_mask=head_mask,
1270
+ inputs_embeds=inputs_embeds,
1271
+ encoder_hidden_states=encoder_hidden_states,
1272
+ encoder_attention_mask=encoder_attention_mask,
1273
+ use_cache=use_cache,
1274
+ output_attentions=output_attentions,
1275
+ output_hidden_states=output_hidden_states,
1276
+ return_dict=return_dict,
1277
+ )
1278
+ hidden_states = transformer_outputs[0]
1279
+
1280
+ # Set device for model parallelism
1281
+ if self.model_parallel:
1282
+ torch.cuda.set_device(self.transformer.first_device)
1283
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1284
+
1285
+ lm_logits = self.lm_head(hidden_states)
1286
+
1287
+ loss = None
1288
+ if labels is not None:
1289
+ # Shift so that tokens < n predict n
1290
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1291
+ shift_labels = labels[..., 1:].contiguous()
1292
+ # Flatten the tokens
1293
+ loss_fct = CrossEntropyLoss()
1294
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1295
+
1296
+ if not return_dict:
1297
+ output = (lm_logits,) + transformer_outputs[1:]
1298
+ return ((loss,) + output) if loss is not None else output
1299
+
1300
+ return CausalLMOutputWithCrossAttentions(
1301
+ loss=loss,
1302
+ logits=lm_logits,
1303
+ past_key_values=transformer_outputs.past_key_values,
1304
+ hidden_states=transformer_outputs.hidden_states,
1305
+ attentions=transformer_outputs.attentions,
1306
+ cross_attentions=transformer_outputs.cross_attentions,
1307
+ )
1308
+
1309
+ @staticmethod
1310
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1311
+ """
1312
+ This function is used to re-order the :obj:`past_key_values` cache if
1313
+ :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
1314
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
1315
+ """
1316
+ return tuple(
1317
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1318
+ for layer_past in past
1319
+ )
1320
+
1321
+
1322
+ @add_start_docstrings(
1323
+ """
1324
+ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
1325
+ RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
1326
+ input embeddings, the classification head takes as input the input of a specified classification token index in the
1327
+ input sequence).
1328
+ """,
1329
+ GPT2_START_DOCSTRING,
1330
+ )
1331
+ class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
1332
+ _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
1333
+
1334
+ def __init__(self, config):
1335
+ super().__init__(config)
1336
+ config.num_labels = 1
1337
+ self.transformer = GPT2Model(config)
1338
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1339
+ self.multiple_choice_head = SequenceSummary(config)
1340
+
1341
+ self.init_weights()
1342
+
1343
+ # Model parallel
1344
+ self.model_parallel = False
1345
+ self.device_map = None
1346
+
1347
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1348
+ def parallelize(self, device_map=None):
1349
+ self.device_map = (
1350
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1351
+ if device_map is None
1352
+ else device_map
1353
+ )
1354
+ assert_device_map(self.device_map, len(self.transformer.h))
1355
+ self.transformer.parallelize(self.device_map)
1356
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1357
+ self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device)
1358
+ self.model_parallel = True
1359
+
1360
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1361
+ def deparallelize(self):
1362
+ self.transformer.deparallelize()
1363
+ self.transformer = self.transformer.to("cpu")
1364
+ self.lm_head = self.lm_head.to("cpu")
1365
+ self.multiple_choice_head = self.multiple_choice_head.to("cpu")
1366
+ self.model_parallel = False
1367
+ torch.cuda.empty_cache()
1368
+
1369
+ def get_output_embeddings(self):
1370
+ return self.lm_head
1371
+
1372
+ def set_output_embeddings(self, new_embeddings):
1373
+ self.lm_head = new_embeddings
1374
+
1375
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
1376
+ token_type_ids = kwargs.get("token_type_ids", None)
1377
+ # only last token for inputs_ids if past is defined in kwargs
1378
+ if past:
1379
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1380
+ if token_type_ids is not None:
1381
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1382
+
1383
+ attention_mask = kwargs.get("attention_mask", None)
1384
+ position_ids = kwargs.get("position_ids", None)
1385
+
1386
+ if attention_mask is not None and position_ids is None:
1387
+ # create position_ids on the fly for batch generation
1388
+ position_ids = attention_mask.long().cumsum(-1) - 1
1389
+ position_ids.masked_fill_(attention_mask == 0, 1)
1390
+ if past:
1391
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1392
+ else:
1393
+ position_ids = None
1394
+
1395
+ return {
1396
+ "input_ids": input_ids,
1397
+ "past_key_values": past,
1398
+ "use_cache": kwargs.get("use_cache"),
1399
+ "position_ids": position_ids,
1400
+ "attention_mask": attention_mask,
1401
+ "token_type_ids": token_type_ids,
1402
+ }
1403
+
1404
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1405
+ @replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
1406
+ def forward(
1407
+ self,
1408
+ input_ids=None,
1409
+ past_key_values=None,
1410
+ attention_mask=None,
1411
+ token_type_ids=None,
1412
+ position_ids=None,
1413
+ head_mask=None,
1414
+ inputs_embeds=None,
1415
+ mc_token_ids=None,
1416
+ labels=None,
1417
+ mc_labels=None,
1418
+ use_cache=None,
1419
+ output_attentions=None,
1420
+ output_hidden_states=None,
1421
+ return_dict=None,
1422
+ **kwargs,
1423
+ ):
1424
+ r"""
1425
+ mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
1426
+ Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1427
+ 1[``.
1428
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1429
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1430
+ ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size - 1]`` All labels set to
1431
+ ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size - 1]``
1432
+ mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
1433
+ Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
1434
+ num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
1435
+ `input_ids` above)
1436
+
1437
+ Return:
1438
+
1439
+ Example::
1440
+
1441
+ >>> import torch
1442
+ >>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
1443
+
1444
+ >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
1445
+ >>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
1446
+
1447
+ >>> # Add a [CLS] to the vocabulary (we should train it also!)
1448
+ >>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
1449
+
1450
+ >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
1451
+
1452
+ >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
1453
+ >>> encoded_choices = [tokenizer.encode(s) for s in choices]
1454
+ >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
1455
+
1456
+ >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
1457
+ >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
1458
+
1459
+ >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
1460
+ >>> lm_logits = outputs.logits
1461
+ >>> mc_logits = outputs.mc_logits
1462
+
1463
+ """
1464
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1465
+
1466
+ transformer_outputs = self.transformer(
1467
+ input_ids,
1468
+ past_key_values=past_key_values,
1469
+ attention_mask=attention_mask,
1470
+ token_type_ids=token_type_ids,
1471
+ position_ids=position_ids,
1472
+ head_mask=head_mask,
1473
+ inputs_embeds=inputs_embeds,
1474
+ use_cache=use_cache,
1475
+ output_attentions=output_attentions,
1476
+ output_hidden_states=output_hidden_states,
1477
+ return_dict=return_dict,
1478
+ )
1479
+
1480
+ hidden_states = transformer_outputs[0]
1481
+
1482
+ # Set device for model parallelism
1483
+ if self.model_parallel:
1484
+ torch.cuda.set_device(self.transformer.first_device)
1485
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1486
+
1487
+ lm_logits = self.lm_head(hidden_states)
1488
+ mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
1489
+
1490
+ mc_loss = None
1491
+ if mc_labels is not None:
1492
+ loss_fct = CrossEntropyLoss()
1493
+ mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
1494
+ lm_loss = None
1495
+ if labels is not None:
1496
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1497
+ shift_labels = labels[..., 1:].contiguous()
1498
+ loss_fct = CrossEntropyLoss()
1499
+ lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1500
+
1501
+ if not return_dict:
1502
+ output = (lm_logits, mc_logits) + transformer_outputs[1:]
1503
+ if mc_loss is not None:
1504
+ output = (mc_loss,) + output
1505
+ return ((lm_loss,) + output) if lm_loss is not None else output
1506
+
1507
+ return GPT2DoubleHeadsModelOutput(
1508
+ loss=lm_loss,
1509
+ mc_loss=mc_loss,
1510
+ logits=lm_logits,
1511
+ mc_logits=mc_logits,
1512
+ past_key_values=transformer_outputs.past_key_values,
1513
+ hidden_states=transformer_outputs.hidden_states,
1514
+ attentions=transformer_outputs.attentions,
1515
+ )
1516
+
1517
+ @staticmethod
1518
+ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
1519
+ """
1520
+ This function is used to re-order the :obj:`past_key_values` cache if
1521
+ :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
1522
+ called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
1523
+ """
1524
+ return tuple(
1525
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1526
+ for layer_past in past
1527
+ )
1528
+
1529
+
1530
+ @add_start_docstrings(
1531
+ """
1532
+ The GPT2 Model transformer with a sequence classification head on top (linear layer).
1533
+
1534
+ :class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
1535
+ other causal models (e.g. GPT-1) do.
1536
+
1537
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1538
+ :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
1539
+ row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
1540
+ guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
1541
+ the last value in each row of the batch).
1542
+ """,
1543
+ GPT2_START_DOCSTRING,
1544
+ )
1545
+ class GPT2ForSequenceClassification(GPT2PreTrainedModel):
1546
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
1547
+
1548
+ def __init__(self, config):
1549
+ super().__init__(config)
1550
+ self.num_labels = config.num_labels
1551
+ self.transformer = GPT2Model(config)
1552
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1553
+
1554
+ self.init_weights()
1555
+
1556
+ # Model parallel
1557
+ self.model_parallel = False
1558
+ self.device_map = None
1559
+
1560
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1561
+ @add_code_sample_docstrings(
1562
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1563
+ checkpoint="microsoft/DialogRPT-updown",
1564
+ output_type=SequenceClassifierOutputWithPast,
1565
+ config_class=_CONFIG_FOR_DOC,
1566
+ )
1567
+ def forward(
1568
+ self,
1569
+ input_ids=None,
1570
+ past_key_values=None,
1571
+ attention_mask=None,
1572
+ token_type_ids=None,
1573
+ position_ids=None,
1574
+ head_mask=None,
1575
+ inputs_embeds=None,
1576
+ labels=None,
1577
+ use_cache=None,
1578
+ output_attentions=None,
1579
+ output_hidden_states=None,
1580
+ return_dict=None,
1581
+ ):
1582
+ r"""
1583
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1584
+ Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
1585
+ config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1586
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1587
+ """
1588
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1589
+
1590
+ transformer_outputs = self.transformer(
1591
+ input_ids,
1592
+ past_key_values=past_key_values,
1593
+ attention_mask=attention_mask,
1594
+ token_type_ids=token_type_ids,
1595
+ position_ids=position_ids,
1596
+ head_mask=head_mask,
1597
+ inputs_embeds=inputs_embeds,
1598
+ use_cache=use_cache,
1599
+ output_attentions=output_attentions,
1600
+ output_hidden_states=output_hidden_states,
1601
+ return_dict=return_dict,
1602
+ )
1603
+ hidden_states = transformer_outputs[0]
1604
+ logits = self.score(hidden_states)
1605
+
1606
+ if input_ids is not None:
1607
+ batch_size, sequence_length = input_ids.shape[:2]
1608
+ else:
1609
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1610
+
1611
+ assert (
1612
+ self.config.pad_token_id is not None or batch_size == 1
1613
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1614
+ if self.config.pad_token_id is None:
1615
+ sequence_lengths = -1
1616
+ else:
1617
+ if input_ids is not None:
1618
+ sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
1619
+ else:
1620
+ sequence_lengths = -1
1621
+ logger.warning(
1622
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1623
+ f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1624
+ )
1625
+
1626
+ pooled_logits = logits[range(batch_size), sequence_lengths]
1627
+
1628
+ loss = None
1629
+ if labels is not None:
1630
+ if self.num_labels == 1:
1631
+ # We are doing regression
1632
+ loss_fct = MSELoss()
1633
+ loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
1634
+ else:
1635
+ loss_fct = CrossEntropyLoss()
1636
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1637
+
1638
+ if not return_dict:
1639
+ output = (pooled_logits,) + transformer_outputs[1:]
1640
+ return ((loss,) + output) if loss is not None else output
1641
+
1642
+ return SequenceClassifierOutputWithPast(
1643
+ loss=loss,
1644
+ logits=pooled_logits,
1645
+ past_key_values=transformer_outputs.past_key_values,
1646
+ hidden_states=transformer_outputs.hidden_states,
1647
+ attentions=transformer_outputs.attentions,
1648
+ )
1649
+
1650
+
1651
+ @add_start_docstrings(
1652
+ """
1653
+ GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1654
+ Named-Entity-Recognition (NER) tasks.
1655
+ """,
1656
+ GPT2_START_DOCSTRING,
1657
+ )
1658
+ class GPT2ForTokenClassification(GPT2PreTrainedModel):
1659
+ def __init__(self, config):
1660
+ super().__init__(config)
1661
+ self.num_labels = config.num_labels
1662
+
1663
+ self.transformer = GPT2Model(config)
1664
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1665
+ classifier_dropout = config.classifier_dropout
1666
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1667
+ classifier_dropout = config.hidden_dropout
1668
+ else:
1669
+ classifier_dropout = 0.1
1670
+ self.dropout = nn.Dropout(classifier_dropout)
1671
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1672
+
1673
+ self.init_weights()
1674
+
1675
+ # Model parallel
1676
+ self.model_parallel = False
1677
+ self.device_map = None
1678
+
1679
+ @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
1680
+ @add_code_sample_docstrings(
1681
+ tokenizer_class=_TOKENIZER_FOR_DOC,
1682
+ checkpoint="microsoft/DialogRPT-updown",
1683
+ output_type=TokenClassifierOutput,
1684
+ config_class=_CONFIG_FOR_DOC,
1685
+ )
1686
+ def forward(
1687
+ self,
1688
+ input_ids=None,
1689
+ past_key_values=None,
1690
+ attention_mask=None,
1691
+ token_type_ids=None,
1692
+ position_ids=None,
1693
+ head_mask=None,
1694
+ inputs_embeds=None,
1695
+ labels=None,
1696
+ use_cache=None,
1697
+ output_attentions=None,
1698
+ output_hidden_states=None,
1699
+ return_dict=None,
1700
+ ):
1701
+ r"""
1702
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
1703
+ Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
1704
+ config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
1705
+ If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1706
+ """
1707
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1708
+
1709
+ transformer_outputs = self.transformer(
1710
+ input_ids,
1711
+ past_key_values=past_key_values,
1712
+ attention_mask=attention_mask,
1713
+ token_type_ids=token_type_ids,
1714
+ position_ids=position_ids,
1715
+ head_mask=head_mask,
1716
+ inputs_embeds=inputs_embeds,
1717
+ use_cache=use_cache,
1718
+ output_attentions=output_attentions,
1719
+ output_hidden_states=output_hidden_states,
1720
+ return_dict=return_dict,
1721
+ )
1722
+
1723
+ hidden_states = transformer_outputs[0]
1724
+ hidden_states = self.dropout(hidden_states)
1725
+ logits = self.classifier(hidden_states)
1726
+
1727
+ loss = None
1728
+ if labels is not None:
1729
+ loss_fct = CrossEntropyLoss()
1730
+ # Only keep active parts of the loss
1731
+ if attention_mask is not None:
1732
+ active_loss = attention_mask.view(-1) == 1
1733
+ active_logits = logits.view(-1, self.num_labels)
1734
+ active_labels = torch.where(
1735
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
1736
+ )
1737
+ loss = loss_fct(active_logits, active_labels)
1738
+ else:
1739
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1740
+
1741
+ if not return_dict:
1742
+ output = (logits,) + transformer_outputs[2:]
1743
+ return ((loss,) + output) if loss is not None else output
1744
+
1745
+ return TokenClassifierOutput(
1746
+ loss=loss,
1747
+ logits=logits,
1748
+ hidden_states=transformer_outputs.hidden_states,
1749
+ attentions=transformer_outputs.attentions,
1750
+ )