applied-ai-018 commited on
Commit
f9bc5ed
·
verified ·
1 Parent(s): c186d25

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +111 -0
  2. CODEOWNERS +1 -0
  3. LICENSE +16 -0
  4. MANIFEST.in +9 -0
  5. Makefile +59 -0
  6. TODO.md +54 -0
  7. evaluation/results/tr11/bloom/mdmeta.txt +1595 -0
  8. evaluation/results/tr11/bloom1b3/bslmeval.json +2938 -0
  9. evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11-1b3-ml-evalharness-results_lm-eval_global_step340500_2022-07-13-11-29-13.json +172 -0
  10. evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-12-22-45-57.json +0 -0
  11. evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-15-11-47-34.json +0 -0
  12. evaluation/results/tr11/bloom1b3/humaneval_temp02.json +1 -0
  13. evaluation/results/tr11/bloom1b3/humaneval_temp06.json +1 -0
  14. evaluation/results/tr11/bloom1b3/humaneval_temp08.json +1 -0
  15. evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-12-23-19-06.json +0 -0
  16. evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-13-19-42-29.json +1917 -0
  17. evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-14-13-10-19.json +0 -0
  18. evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-14-20-09-16.json +1255 -0
  19. evaluation/results/tr11/bloom350m/humaneval_temp02.json +1 -0
  20. evaluation/results/tr11/bloom350m/humaneval_temp06.json +1 -0
  21. evaluation/results/tr11/get_templates.sh +27 -0
  22. evaluation/results/tr11/opt/humaneval_temp02.json +1 -0
  23. evaluation/results/tr11/opt/humaneval_temp08.json +1 -0
  24. evaluation/results/tr11/scripts/download.py +21 -0
  25. evaluation/results/tr11/scripts/multi_eurlex_tmp.slurm +63 -0
  26. evaluation/results/tr11/scripts/report-to-csv.py +58 -0
  27. evaluation/results/tr11/scripts/run_bsevalharness_generation_176b.slurm +128 -0
  28. evaluation/results/tr11/scripts/run_bsevalharness_generation_350m.slurm +110 -0
  29. evaluation/results/tr11/scripts/run_bsevalharness_generation_760m.slurm +110 -0
  30. evaluation/results/tr11/scripts/run_bsevalharness_tr11c-2b5-ml.slurm +121 -0
  31. evaluation/results/tr11/scripts/run_bsevalharness_tr11e-350m-ml.slurm +120 -0
  32. evaluation/results/tr11/scripts/run_bsevalharness_tr11f-6b3-ml.slurm +121 -0
  33. evaluation/results/tr11/scripts/run_evalharness_deepspeed.md +158 -0
  34. evaluation/results/tr11/scripts/run_evalharness_deepspeed.slurm +98 -0
  35. evaluation/results/tr11/scripts/run_evalharness_tr11-176b-ml.slurm +121 -0
  36. evaluation/results/tr11/scripts/run_evalharness_tr11b-1b3-ml.slurm +120 -0
  37. evaluation/results/tr11/scripts/run_evalharness_tr11c-2b5-ml.slurm +120 -0
  38. evaluation/results/tr11/scripts/run_evalharness_tr11d-760m-ml.slurm +118 -0
  39. evaluation/results/tr11/scripts/run_evalharness_tr11e-350m-ml.slurm +118 -0
  40. evaluation/results/tr11/scripts/run_evalharness_tr11f-6b3-ml.slurm +120 -0
  41. evaluation/results/tr11/scripts/run_trevalharness_7b1.slurm +60 -0
  42. evaluation/results/tr13/download_bslmeval.slurm +37 -0
  43. evaluation/results/tr13/lmeval/megdsbslmeval.slurm +139 -0
  44. evaluation/results/tr13/lmeval/run_generation.slurm +90 -0
  45. evaluation/results/tr13/lmeval/run_generation_7b1.slurm +86 -0
  46. evaluation/results/tr13/lmeval/transformersbslmeval.slurm +53 -0
  47. evaluation/results/tr13/tzeroeval/convert_validation_176b.slurm +373 -0
  48. evaluation/results/tr13/tzeroeval/convert_validation_1b3.slurm +352 -0
  49. evaluation/results/tr13/tzeroeval/convert_validation_350m.slurm +350 -0
  50. evaluation/results/tr13/tzeroeval/convert_validation_760m.slurm +352 -0
.gitignore ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ env/
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+
58
+ # Flask stuff:
59
+ instance/
60
+ .webassets-cache
61
+
62
+ # Scrapy stuff:
63
+ .scrapy
64
+
65
+ # Sphinx documentation
66
+ docs/_build/
67
+
68
+ # PyBuilder
69
+ target/
70
+
71
+ # Jupyter Notebook
72
+ .ipynb_checkpoints
73
+
74
+ # pyenv
75
+ .python-version
76
+
77
+ # celery beat schedule file
78
+ celerybeat-schedule
79
+
80
+ # SageMath parsed files
81
+ *.sage.py
82
+
83
+ # dotenv
84
+ .env
85
+
86
+ # virtualenv
87
+ .venv
88
+ venv/
89
+ ENV/
90
+
91
+ # Spyder project settings
92
+ .spyderproject
93
+ .spyproject
94
+
95
+ # Rope project settings
96
+ .ropeproject
97
+
98
+ # mkdocs documentation
99
+ /site
100
+
101
+ # mypy
102
+ .mypy_cache/
103
+
104
+ # IDE settings
105
+ .vscode/
106
+ .idea/
107
+
108
+ # WanDB
109
+ wandb
110
+
111
+ *DS_Store
CODEOWNERS ADDED
@@ -0,0 +1 @@
 
 
1
+ * @bigscience/bigscience-codeowners
LICENSE ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache Software License 2.0
2
+
3
+ Copyright (c) 2021, Stas Bekman
4
+
5
+ Licensed under the Apache License, Version 2.0 (the "License");
6
+ you may not use this file except in compliance with the License.
7
+ You may obtain a copy of the License at
8
+
9
+ http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ Unless required by applicable law or agreed to in writing, software
12
+ distributed under the License is distributed on an "AS IS" BASIS,
13
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ See the License for the specific language governing permissions and
15
+ limitations under the License.
16
+
MANIFEST.in ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ include CONTRIBUTING.md
2
+ include LICENSE
3
+ include README.md
4
+
5
+ recursive-include tests *
6
+ recursive-exclude * __pycache__
7
+ recursive-exclude * *.py[co]
8
+
9
+ recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
Makefile ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: clean clean-test clean-pyc clean-build docs help
2
+ .DEFAULT_GOAL := help
3
+
4
+ define BROWSER_PYSCRIPT
5
+ import os, webbrowser, sys
6
+
7
+ from urllib.request import pathname2url
8
+
9
+ webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
10
+ endef
11
+ export BROWSER_PYSCRIPT
12
+
13
+ define PRINT_HELP_PYSCRIPT
14
+ import re, sys
15
+
16
+ for line in sys.stdin:
17
+ match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
18
+ if match:
19
+ target, help = match.groups()
20
+ print("%-20s %s" % (target, help))
21
+ endef
22
+ export PRINT_HELP_PYSCRIPT
23
+
24
+ BROWSER := python -c "$$BROWSER_PYSCRIPT"
25
+
26
+ help:
27
+ @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
28
+
29
+ clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
30
+
31
+ clean-build: ## remove build artifacts
32
+ rm -fr build/
33
+ rm -fr dist/
34
+ rm -fr .eggs/
35
+ find . -name '*.egg-info' -exec rm -fr {} +
36
+ find . -name '*.egg' -exec rm -f {} +
37
+
38
+ clean-pyc: ## remove Python file artifacts
39
+ find . -name '*.pyc' -exec rm -f {} +
40
+ find . -name '*.pyo' -exec rm -f {} +
41
+ find . -name '*~' -exec rm -f {} +
42
+ find . -name '__pycache__' -exec rm -fr {} +
43
+
44
+ clean-test: ## remove test and coverage artifacts
45
+ rm -fr .pytest_cache
46
+
47
+ lint: ## check style with flake8
48
+ flake8 bigscience tests
49
+
50
+ test: ## run tests quickly with the default Python
51
+ pytest
52
+
53
+ dist: clean ## builds source and wheel package
54
+ python setup.py sdist
55
+ python setup.py bdist_wheel
56
+ ls -l dist
57
+
58
+ install: clean ## install the package to the active Python's site-packages
59
+ python setup.py install
TODO.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Things to do
2
+
3
+
4
+ ## CI
5
+
6
+ - replace CI with constantly running GCP instance
7
+
8
+
9
+
10
+ ## TODO
11
+
12
+ general:
13
+
14
+ - check if --jobid=$SLURM_JOB is actually needed in the slurm script - especially when doing it interactively
15
+
16
+ - add alerts for loss spikes
17
+
18
+ - check that my syncing script doesn't sync deleted files, should SCRATCH wipe something out that is already on the hub!
19
+
20
+ - update deepspeed_to_transformers.py to require a specific version once a new version of transformers is released and then update the doc https://github.com/bigscience-workshop/bigscience/tree/master/train/tr1-13B-base#checkpoint-conversion-and-upload
21
+
22
+ - see if can speed up the meg cuda kernels building
23
+ https://huggingface.slack.com/archives/C01NHER1JLS/p1630520151064500?thread_ts=1630473623.060700&cid=C01NHER1JLS
24
+
25
+ - since we are starting to tweak the seed, we should start logging the ranges of iteration for each seed, so that down the road we could reproduce the data.
26
+
27
+
28
+ - test 1.3b with final config using tr7d as a base line: https://github.com/bigscience-workshop/bigscience/blob/cfdd69b89118a77567ee87b5a181c233fffef377/train/tr7-alibi/tr7d-1B3-modeling-alibi.slurm
29
+
30
+
31
+ ## sysadmin
32
+
33
+
34
+
35
+ ### conda packages
36
+
37
+ currently each one of us has a copy of the same conda packages:
38
+
39
+ ```
40
+ conda config --show pkgs_dirs envs_dirs and the output is:
41
+ pkgs_dirs:
42
+ - /gpfslocalsup/pub/anaconda-py3/2020.02/pkgs
43
+ - /linkhome/rech/genhug01/uue59kq/.conda/pkgs
44
+ envs_dirs:
45
+ - /gpfswork/rech/six/commun/conda
46
+ - /linkhome/rech/genhug01/uue59kq/.conda/envs
47
+ - /gpfslocalsup/pub/anaconda-py3/2020.02/envs
48
+ ```
49
+
50
+ we should aggregate them under the same dir.
51
+
52
+ probably need to find out the right env var (best) or ~/.condarc (less good) and point it to the shared conda env.
53
+
54
+ - also document in the getting started docs to make sure new users don't end up with ~/.conda dir which uses up their HOME dir to 100%.
evaluation/results/tr11/bloom/mdmeta.txt ADDED
@@ -0,0 +1,1595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model-index:
2
+ - name: bloom
3
+ results:
4
+ - task:
5
+ type: text-generation
6
+ name: text generation
7
+ dataset:
8
+ name: arc_challenge
9
+ type: arc_challenge
10
+ metrics:
11
+ - name: acc
12
+ type: acc
13
+ value: 0.4112627986348123
14
+ verified: false
15
+ - task:
16
+ type: text-generation
17
+ name: text generation
18
+ dataset:
19
+ name: arc_easy
20
+ type: arc_easy
21
+ metrics:
22
+ - name: acc
23
+ type: acc
24
+ value: 0.726010101010101
25
+ verified: false
26
+ - task:
27
+ type: text-generation
28
+ name: text generation
29
+ dataset:
30
+ name: axb
31
+ type: axb
32
+ metrics:
33
+ - name: acc
34
+ type: acc
35
+ value: 0.5751811594202898
36
+ verified: false
37
+ - task:
38
+ type: text-generation
39
+ name: text generation
40
+ dataset:
41
+ name: axg
42
+ type: axg
43
+ metrics:
44
+ - name: acc
45
+ type: acc
46
+ value: 0.5252808988764045
47
+ verified: false
48
+ - task:
49
+ type: text-generation
50
+ name: text generation
51
+ dataset:
52
+ name: boolq
53
+ type: boolq
54
+ metrics:
55
+ - name: acc
56
+ type: acc
57
+ value: 0.6345565749235474
58
+ verified: false
59
+ - task:
60
+ type: text-generation
61
+ name: text generation
62
+ dataset:
63
+ name: cb
64
+ type: cb
65
+ metrics:
66
+ - name: acc
67
+ type: acc
68
+ value: 0.3392857142857143
69
+ verified: false
70
+ - task:
71
+ type: text-generation
72
+ name: text generation
73
+ dataset:
74
+ name: cola
75
+ type: cola
76
+ metrics:
77
+ - name: acc
78
+ type: acc
79
+ value: 0.39022051773729627
80
+ verified: false
81
+ - task:
82
+ type: text-generation
83
+ name: text generation
84
+ dataset:
85
+ name: copa
86
+ type: copa
87
+ metrics:
88
+ - name: acc
89
+ type: acc
90
+ value: 0.56
91
+ verified: false
92
+ - task:
93
+ type: text-generation
94
+ name: text generation
95
+ dataset:
96
+ name: crows_pairs_english
97
+ type: crows_pairs_english
98
+ metrics:
99
+ - name: acc
100
+ type: acc
101
+ value: 0.5
102
+ verified: false
103
+ - task:
104
+ type: text-generation
105
+ name: text generation
106
+ dataset:
107
+ name: crows_pairs_french
108
+ type: crows_pairs_french
109
+ metrics:
110
+ - name: acc
111
+ type: acc
112
+ value: 0.505664877757901
113
+ verified: false
114
+ - task:
115
+ type: text-generation
116
+ name: text generation
117
+ dataset:
118
+ name: diabla
119
+ type: diabla
120
+ metrics:
121
+ - name: acc
122
+ type: acc
123
+ value: 0.2947981906750174
124
+ verified: false
125
+ - task:
126
+ type: text-generation
127
+ name: text generation
128
+ dataset:
129
+ name: gsarti/flores_101_afr
130
+ type: gsarti/flores_101_afr
131
+ metrics:
132
+ - name: byte_perplexity
133
+ type: byte_perplexity
134
+ value: 4.25431550058444
135
+ verified: false
136
+ - task:
137
+ type: text-generation
138
+ name: text generation
139
+ dataset:
140
+ name: gsarti/flores_101_amh
141
+ type: gsarti/flores_101_amh
142
+ metrics:
143
+ - name: byte_perplexity
144
+ type: byte_perplexity
145
+ value: 3.716877477347089
146
+ verified: false
147
+ - task:
148
+ type: text-generation
149
+ name: text generation
150
+ dataset:
151
+ name: gsarti/flores_101_ara
152
+ type: gsarti/flores_101_ara
153
+ metrics:
154
+ - name: byte_perplexity
155
+ type: byte_perplexity
156
+ value: 1.7049030137120964
157
+ verified: false
158
+ - task:
159
+ type: text-generation
160
+ name: text generation
161
+ dataset:
162
+ name: gsarti/flores_101_asm
163
+ type: gsarti/flores_101_asm
164
+ metrics:
165
+ - name: byte_perplexity
166
+ type: byte_perplexity
167
+ value: 6.576581380404954
168
+ verified: false
169
+ - task:
170
+ type: text-generation
171
+ name: text generation
172
+ dataset:
173
+ name: gsarti/flores_101_ast
174
+ type: gsarti/flores_101_ast
175
+ metrics:
176
+ - name: byte_perplexity
177
+ type: byte_perplexity
178
+ value: 2.8562364775797944
179
+ verified: false
180
+ - task:
181
+ type: text-generation
182
+ name: text generation
183
+ dataset:
184
+ name: gsarti/flores_101_azj
185
+ type: gsarti/flores_101_azj
186
+ metrics:
187
+ - name: byte_perplexity
188
+ type: byte_perplexity
189
+ value: 4.80721528624391
190
+ verified: false
191
+ - task:
192
+ type: text-generation
193
+ name: text generation
194
+ dataset:
195
+ name: gsarti/flores_101_bel
196
+ type: gsarti/flores_101_bel
197
+ metrics:
198
+ - name: byte_perplexity
199
+ type: byte_perplexity
200
+ value: 2.7312177406635065
201
+ verified: false
202
+ - task:
203
+ type: text-generation
204
+ name: text generation
205
+ dataset:
206
+ name: gsarti/flores_101_ben
207
+ type: gsarti/flores_101_ben
208
+ metrics:
209
+ - name: byte_perplexity
210
+ type: byte_perplexity
211
+ value: 5.993409478990023
212
+ verified: false
213
+ - task:
214
+ type: text-generation
215
+ name: text generation
216
+ dataset:
217
+ name: gsarti/flores_101_bos
218
+ type: gsarti/flores_101_bos
219
+ metrics:
220
+ - name: byte_perplexity
221
+ type: byte_perplexity
222
+ value: 3.5936169095529493
223
+ verified: false
224
+ - task:
225
+ type: text-generation
226
+ name: text generation
227
+ dataset:
228
+ name: gsarti/flores_101_bul
229
+ type: gsarti/flores_101_bul
230
+ metrics:
231
+ - name: byte_perplexity
232
+ type: byte_perplexity
233
+ value: 2.159035321398085
234
+ verified: false
235
+ - task:
236
+ type: text-generation
237
+ name: text generation
238
+ dataset:
239
+ name: gsarti/flores_101_cat
240
+ type: gsarti/flores_101_cat
241
+ metrics:
242
+ - name: byte_perplexity
243
+ type: byte_perplexity
244
+ value: 2.167873680006659
245
+ verified: false
246
+ - task:
247
+ type: text-generation
248
+ name: text generation
249
+ dataset:
250
+ name: gsarti/flores_101_ceb
251
+ type: gsarti/flores_101_ceb
252
+ metrics:
253
+ - name: byte_perplexity
254
+ type: byte_perplexity
255
+ value: 5.286975089885673
256
+ verified: false
257
+ - task:
258
+ type: text-generation
259
+ name: text generation
260
+ dataset:
261
+ name: gsarti/flores_101_ces
262
+ type: gsarti/flores_101_ces
263
+ metrics:
264
+ - name: byte_perplexity
265
+ type: byte_perplexity
266
+ value: 3.4516208322236017
267
+ verified: false
268
+ - task:
269
+ type: text-generation
270
+ name: text generation
271
+ dataset:
272
+ name: gsarti/flores_101_ckb
273
+ type: gsarti/flores_101_ckb
274
+ metrics:
275
+ - name: byte_perplexity
276
+ type: byte_perplexity
277
+ value: 3.7051034724765612
278
+ verified: false
279
+ - task:
280
+ type: text-generation
281
+ name: text generation
282
+ dataset:
283
+ name: gsarti/flores_101_cym
284
+ type: gsarti/flores_101_cym
285
+ metrics:
286
+ - name: byte_perplexity
287
+ type: byte_perplexity
288
+ value: 7.0889312398688125
289
+ verified: false
290
+ - task:
291
+ type: text-generation
292
+ name: text generation
293
+ dataset:
294
+ name: gsarti/flores_101_dan
295
+ type: gsarti/flores_101_dan
296
+ metrics:
297
+ - name: byte_perplexity
298
+ type: byte_perplexity
299
+ value: 3.4300748208111838
300
+ verified: false
301
+ - task:
302
+ type: text-generation
303
+ name: text generation
304
+ dataset:
305
+ name: gsarti/flores_101_deu
306
+ type: gsarti/flores_101_deu
307
+ metrics:
308
+ - name: byte_perplexity
309
+ type: byte_perplexity
310
+ value: 2.3380585896268107
311
+ verified: false
312
+ - task:
313
+ type: text-generation
314
+ name: text generation
315
+ dataset:
316
+ name: gsarti/flores_101_ell
317
+ type: gsarti/flores_101_ell
318
+ metrics:
319
+ - name: byte_perplexity
320
+ type: byte_perplexity
321
+ value: 1.9595604725375586
322
+ verified: false
323
+ - task:
324
+ type: text-generation
325
+ name: text generation
326
+ dataset:
327
+ name: gsarti/flores_101_eng
328
+ type: gsarti/flores_101_eng
329
+ metrics:
330
+ - name: byte_perplexity
331
+ type: byte_perplexity
332
+ value: 1.8819637649637901
333
+ verified: false
334
+ - task:
335
+ type: text-generation
336
+ name: text generation
337
+ dataset:
338
+ name: gsarti/flores_101_est
339
+ type: gsarti/flores_101_est
340
+ metrics:
341
+ - name: byte_perplexity
342
+ type: byte_perplexity
343
+ value: 5.773850600380297
344
+ verified: false
345
+ - task:
346
+ type: text-generation
347
+ name: text generation
348
+ dataset:
349
+ name: gsarti/flores_101_fas
350
+ type: gsarti/flores_101_fas
351
+ metrics:
352
+ - name: byte_perplexity
353
+ type: byte_perplexity
354
+ value: 2.4306140728294086
355
+ verified: false
356
+ - task:
357
+ type: text-generation
358
+ name: text generation
359
+ dataset:
360
+ name: gsarti/flores_101_fin
361
+ type: gsarti/flores_101_fin
362
+ metrics:
363
+ - name: byte_perplexity
364
+ type: byte_perplexity
365
+ value: 4.304305536244342
366
+ verified: false
367
+ - task:
368
+ type: text-generation
369
+ name: text generation
370
+ dataset:
371
+ name: gsarti/flores_101_fra
372
+ type: gsarti/flores_101_fra
373
+ metrics:
374
+ - name: byte_perplexity
375
+ type: byte_perplexity
376
+ value: 1.9374688438541796
377
+ verified: false
378
+ - task:
379
+ type: text-generation
380
+ name: text generation
381
+ dataset:
382
+ name: gsarti/flores_101_ful
383
+ type: gsarti/flores_101_ful
384
+ metrics:
385
+ - name: byte_perplexity
386
+ type: byte_perplexity
387
+ value: 9.740353097219378
388
+ verified: false
389
+ - task:
390
+ type: text-generation
391
+ name: text generation
392
+ dataset:
393
+ name: gsarti/flores_101_gle
394
+ type: gsarti/flores_101_gle
395
+ metrics:
396
+ - name: byte_perplexity
397
+ type: byte_perplexity
398
+ value: 6.035269765075012
399
+ verified: false
400
+ - task:
401
+ type: text-generation
402
+ name: text generation
403
+ dataset:
404
+ name: gsarti/flores_101_glg
405
+ type: gsarti/flores_101_glg
406
+ metrics:
407
+ - name: byte_perplexity
408
+ type: byte_perplexity
409
+ value: 2.365451129546636
410
+ verified: false
411
+ - task:
412
+ type: text-generation
413
+ name: text generation
414
+ dataset:
415
+ name: gsarti/flores_101_guj
416
+ type: gsarti/flores_101_guj
417
+ metrics:
418
+ - name: byte_perplexity
419
+ type: byte_perplexity
420
+ value: 5.70676742569154
421
+ verified: false
422
+ - task:
423
+ type: text-generation
424
+ name: text generation
425
+ dataset:
426
+ name: gsarti/flores_101_hau
427
+ type: gsarti/flores_101_hau
428
+ metrics:
429
+ - name: byte_perplexity
430
+ type: byte_perplexity
431
+ value: 8.855204288260023
432
+ verified: false
433
+ - task:
434
+ type: text-generation
435
+ name: text generation
436
+ dataset:
437
+ name: gsarti/flores_101_heb
438
+ type: gsarti/flores_101_heb
439
+ metrics:
440
+ - name: byte_perplexity
441
+ type: byte_perplexity
442
+ value: 2.920943798471208
443
+ verified: false
444
+ - task:
445
+ type: text-generation
446
+ name: text generation
447
+ dataset:
448
+ name: gsarti/flores_101_hin
449
+ type: gsarti/flores_101_hin
450
+ metrics:
451
+ - name: byte_perplexity
452
+ type: byte_perplexity
453
+ value: 5.452028001573195
454
+ verified: false
455
+ - task:
456
+ type: text-generation
457
+ name: text generation
458
+ dataset:
459
+ name: gsarti/flores_101_hrv
460
+ type: gsarti/flores_101_hrv
461
+ metrics:
462
+ - name: byte_perplexity
463
+ type: byte_perplexity
464
+ value: 3.7056829077179225
465
+ verified: false
466
+ - task:
467
+ type: text-generation
468
+ name: text generation
469
+ dataset:
470
+ name: gsarti/flores_101_hun
471
+ type: gsarti/flores_101_hun
472
+ metrics:
473
+ - name: byte_perplexity
474
+ type: byte_perplexity
475
+ value: 4.058579478967854
476
+ verified: false
477
+ - task:
478
+ type: text-generation
479
+ name: text generation
480
+ dataset:
481
+ name: gsarti/flores_101_hye
482
+ type: gsarti/flores_101_hye
483
+ metrics:
484
+ - name: byte_perplexity
485
+ type: byte_perplexity
486
+ value: 3.127237816041562
487
+ verified: false
488
+ - task:
489
+ type: text-generation
490
+ name: text generation
491
+ dataset:
492
+ name: gsarti/flores_101_ibo
493
+ type: gsarti/flores_101_ibo
494
+ metrics:
495
+ - name: byte_perplexity
496
+ type: byte_perplexity
497
+ value: 3.9500357969906683
498
+ verified: false
499
+ - task:
500
+ type: text-generation
501
+ name: text generation
502
+ dataset:
503
+ name: gsarti/flores_101_ind
504
+ type: gsarti/flores_101_ind
505
+ metrics:
506
+ - name: byte_perplexity
507
+ type: byte_perplexity
508
+ value: 1.976163584180101
509
+ verified: false
510
+ - task:
511
+ type: text-generation
512
+ name: text generation
513
+ dataset:
514
+ name: gsarti/flores_101_isl
515
+ type: gsarti/flores_101_isl
516
+ metrics:
517
+ - name: byte_perplexity
518
+ type: byte_perplexity
519
+ value: 5.500542085165231
520
+ verified: false
521
+ - task:
522
+ type: text-generation
523
+ name: text generation
524
+ dataset:
525
+ name: gsarti/flores_101_ita
526
+ type: gsarti/flores_101_ita
527
+ metrics:
528
+ - name: byte_perplexity
529
+ type: byte_perplexity
530
+ value: 2.314465100752677
531
+ verified: false
532
+ - task:
533
+ type: text-generation
534
+ name: text generation
535
+ dataset:
536
+ name: gsarti/flores_101_jav
537
+ type: gsarti/flores_101_jav
538
+ metrics:
539
+ - name: byte_perplexity
540
+ type: byte_perplexity
541
+ value: 4.942322446550142
542
+ verified: false
543
+ - task:
544
+ type: text-generation
545
+ name: text generation
546
+ dataset:
547
+ name: gsarti/flores_101_jpn
548
+ type: gsarti/flores_101_jpn
549
+ metrics:
550
+ - name: byte_perplexity
551
+ type: byte_perplexity
552
+ value: 2.259421750521777
553
+ verified: false
554
+ - task:
555
+ type: text-generation
556
+ name: text generation
557
+ dataset:
558
+ name: gsarti/flores_101_kam
559
+ type: gsarti/flores_101_kam
560
+ metrics:
561
+ - name: byte_perplexity
562
+ type: byte_perplexity
563
+ value: 9.743025325635475
564
+ verified: false
565
+ - task:
566
+ type: text-generation
567
+ name: text generation
568
+ dataset:
569
+ name: gsarti/flores_101_kan
570
+ type: gsarti/flores_101_kan
571
+ metrics:
572
+ - name: byte_perplexity
573
+ type: byte_perplexity
574
+ value: 6.233724699944989
575
+ verified: false
576
+ - task:
577
+ type: text-generation
578
+ name: text generation
579
+ dataset:
580
+ name: gsarti/flores_101_kat
581
+ type: gsarti/flores_101_kat
582
+ metrics:
583
+ - name: byte_perplexity
584
+ type: byte_perplexity
585
+ value: 2.0508893415872107
586
+ verified: false
587
+ - task:
588
+ type: text-generation
589
+ name: text generation
590
+ dataset:
591
+ name: gsarti/flores_101_kaz
592
+ type: gsarti/flores_101_kaz
593
+ metrics:
594
+ - name: byte_perplexity
595
+ type: byte_perplexity
596
+ value: 3.0390148516287927
597
+ verified: false
598
+ - task:
599
+ type: text-generation
600
+ name: text generation
601
+ dataset:
602
+ name: gsarti/flores_101_kea
603
+ type: gsarti/flores_101_kea
604
+ metrics:
605
+ - name: byte_perplexity
606
+ type: byte_perplexity
607
+ value: 7.147132270533836
608
+ verified: false
609
+ - task:
610
+ type: text-generation
611
+ name: text generation
612
+ dataset:
613
+ name: gsarti/flores_101_khm
614
+ type: gsarti/flores_101_khm
615
+ metrics:
616
+ - name: byte_perplexity
617
+ type: byte_perplexity
618
+ value: 3.366514710252477
619
+ verified: false
620
+ - task:
621
+ type: text-generation
622
+ name: text generation
623
+ dataset:
624
+ name: gsarti/flores_101_kir
625
+ type: gsarti/flores_101_kir
626
+ metrics:
627
+ - name: byte_perplexity
628
+ type: byte_perplexity
629
+ value: 3.2413845359487885
630
+ verified: false
631
+ - task:
632
+ type: text-generation
633
+ name: text generation
634
+ dataset:
635
+ name: gsarti/flores_101_kor
636
+ type: gsarti/flores_101_kor
637
+ metrics:
638
+ - name: byte_perplexity
639
+ type: byte_perplexity
640
+ value: 2.9023196482741027
641
+ verified: false
642
+ - task:
643
+ type: text-generation
644
+ name: text generation
645
+ dataset:
646
+ name: gsarti/flores_101_lao
647
+ type: gsarti/flores_101_lao
648
+ metrics:
649
+ - name: byte_perplexity
650
+ type: byte_perplexity
651
+ value: 2.331446855837494
652
+ verified: false
653
+ - task:
654
+ type: text-generation
655
+ name: text generation
656
+ dataset:
657
+ name: gsarti/flores_101_lav
658
+ type: gsarti/flores_101_lav
659
+ metrics:
660
+ - name: byte_perplexity
661
+ type: byte_perplexity
662
+ value: 5.223609016485348
663
+ verified: false
664
+ - task:
665
+ type: text-generation
666
+ name: text generation
667
+ dataset:
668
+ name: gsarti/flores_101_lin
669
+ type: gsarti/flores_101_lin
670
+ metrics:
671
+ - name: byte_perplexity
672
+ type: byte_perplexity
673
+ value: 4.847471204107301
674
+ verified: false
675
+ - task:
676
+ type: text-generation
677
+ name: text generation
678
+ dataset:
679
+ name: gsarti/flores_101_lit
680
+ type: gsarti/flores_101_lit
681
+ metrics:
682
+ - name: byte_perplexity
683
+ type: byte_perplexity
684
+ value: 4.5432035498036765
685
+ verified: false
686
+ - task:
687
+ type: text-generation
688
+ name: text generation
689
+ dataset:
690
+ name: gsarti/flores_101_ltz
691
+ type: gsarti/flores_101_ltz
692
+ metrics:
693
+ - name: byte_perplexity
694
+ type: byte_perplexity
695
+ value: 5.5910516978201015
696
+ verified: false
697
+ - task:
698
+ type: text-generation
699
+ name: text generation
700
+ dataset:
701
+ name: gsarti/flores_101_lug
702
+ type: gsarti/flores_101_lug
703
+ metrics:
704
+ - name: byte_perplexity
705
+ type: byte_perplexity
706
+ value: 5.4301049946044175
707
+ verified: false
708
+ - task:
709
+ type: text-generation
710
+ name: text generation
711
+ dataset:
712
+ name: gsarti/flores_101_luo
713
+ type: gsarti/flores_101_luo
714
+ metrics:
715
+ - name: byte_perplexity
716
+ type: byte_perplexity
717
+ value: 12.031029857399394
718
+ verified: false
719
+ - task:
720
+ type: text-generation
721
+ name: text generation
722
+ dataset:
723
+ name: gsarti/flores_101_mal
724
+ type: gsarti/flores_101_mal
725
+ metrics:
726
+ - name: byte_perplexity
727
+ type: byte_perplexity
728
+ value: 4.794302548141229
729
+ verified: false
730
+ - task:
731
+ type: text-generation
732
+ name: text generation
733
+ dataset:
734
+ name: gsarti/flores_101_mar
735
+ type: gsarti/flores_101_mar
736
+ metrics:
737
+ - name: byte_perplexity
738
+ type: byte_perplexity
739
+ value: 6.856682255407709
740
+ verified: false
741
+ - task:
742
+ type: text-generation
743
+ name: text generation
744
+ dataset:
745
+ name: gsarti/flores_101_mkd
746
+ type: gsarti/flores_101_mkd
747
+ metrics:
748
+ - name: byte_perplexity
749
+ type: byte_perplexity
750
+ value: 2.3354144607382983
751
+ verified: false
752
+ - task:
753
+ type: text-generation
754
+ name: text generation
755
+ dataset:
756
+ name: gsarti/flores_101_mlt
757
+ type: gsarti/flores_101_mlt
758
+ metrics:
759
+ - name: byte_perplexity
760
+ type: byte_perplexity
761
+ value: 9.04135227904975
762
+ verified: false
763
+ - task:
764
+ type: text-generation
765
+ name: text generation
766
+ dataset:
767
+ name: gsarti/flores_101_mon
768
+ type: gsarti/flores_101_mon
769
+ metrics:
770
+ - name: byte_perplexity
771
+ type: byte_perplexity
772
+ value: 3.094907723618666
773
+ verified: false
774
+ - task:
775
+ type: text-generation
776
+ name: text generation
777
+ dataset:
778
+ name: gsarti/flores_101_mri
779
+ type: gsarti/flores_101_mri
780
+ metrics:
781
+ - name: byte_perplexity
782
+ type: byte_perplexity
783
+ value: 5.2659698341456505
784
+ verified: false
785
+ - task:
786
+ type: text-generation
787
+ name: text generation
788
+ dataset:
789
+ name: gsarti/flores_101_msa
790
+ type: gsarti/flores_101_msa
791
+ metrics:
792
+ - name: byte_perplexity
793
+ type: byte_perplexity
794
+ value: 2.2220779892820985
795
+ verified: false
796
+ - task:
797
+ type: text-generation
798
+ name: text generation
799
+ dataset:
800
+ name: gsarti/flores_101_mya
801
+ type: gsarti/flores_101_mya
802
+ metrics:
803
+ - name: byte_perplexity
804
+ type: byte_perplexity
805
+ value: 2.5229159853414433
806
+ verified: false
807
+ - task:
808
+ type: text-generation
809
+ name: text generation
810
+ dataset:
811
+ name: gsarti/flores_101_nld
812
+ type: gsarti/flores_101_nld
813
+ metrics:
814
+ - name: byte_perplexity
815
+ type: byte_perplexity
816
+ value: 2.799153089002766
817
+ verified: false
818
+ - task:
819
+ type: text-generation
820
+ name: text generation
821
+ dataset:
822
+ name: gsarti/flores_101_nob
823
+ type: gsarti/flores_101_nob
824
+ metrics:
825
+ - name: byte_perplexity
826
+ type: byte_perplexity
827
+ value: 3.628942049758715
828
+ verified: false
829
+ - task:
830
+ type: text-generation
831
+ name: text generation
832
+ dataset:
833
+ name: gsarti/flores_101_npi
834
+ type: gsarti/flores_101_npi
835
+ metrics:
836
+ - name: byte_perplexity
837
+ type: byte_perplexity
838
+ value: 6.666236527803879
839
+ verified: false
840
+ - task:
841
+ type: text-generation
842
+ name: text generation
843
+ dataset:
844
+ name: gsarti/flores_101_nso
845
+ type: gsarti/flores_101_nso
846
+ metrics:
847
+ - name: byte_perplexity
848
+ type: byte_perplexity
849
+ value: 5.015319074943932
850
+ verified: false
851
+ - task:
852
+ type: text-generation
853
+ name: text generation
854
+ dataset:
855
+ name: gsarti/flores_101_nya
856
+ type: gsarti/flores_101_nya
857
+ metrics:
858
+ - name: byte_perplexity
859
+ type: byte_perplexity
860
+ value: 4.938044040751036
861
+ verified: false
862
+ - task:
863
+ type: text-generation
864
+ name: text generation
865
+ dataset:
866
+ name: gsarti/flores_101_oci
867
+ type: gsarti/flores_101_oci
868
+ metrics:
869
+ - name: byte_perplexity
870
+ type: byte_perplexity
871
+ value: 3.607440766288032
872
+ verified: false
873
+ - task:
874
+ type: text-generation
875
+ name: text generation
876
+ dataset:
877
+ name: gsarti/flores_101_orm
878
+ type: gsarti/flores_101_orm
879
+ metrics:
880
+ - name: byte_perplexity
881
+ type: byte_perplexity
882
+ value: 11.31585044916705
883
+ verified: false
884
+ - task:
885
+ type: text-generation
886
+ name: text generation
887
+ dataset:
888
+ name: gsarti/flores_101_ory
889
+ type: gsarti/flores_101_ory
890
+ metrics:
891
+ - name: byte_perplexity
892
+ type: byte_perplexity
893
+ value: 5.981891184515959
894
+ verified: false
895
+ - task:
896
+ type: text-generation
897
+ name: text generation
898
+ dataset:
899
+ name: gsarti/flores_101_pan
900
+ type: gsarti/flores_101_pan
901
+ metrics:
902
+ - name: byte_perplexity
903
+ type: byte_perplexity
904
+ value: 4.7716086841502685
905
+ verified: false
906
+ - task:
907
+ type: text-generation
908
+ name: text generation
909
+ dataset:
910
+ name: gsarti/flores_101_pol
911
+ type: gsarti/flores_101_pol
912
+ metrics:
913
+ - name: byte_perplexity
914
+ type: byte_perplexity
915
+ value: 3.01200174157614
916
+ verified: false
917
+ - task:
918
+ type: text-generation
919
+ name: text generation
920
+ dataset:
921
+ name: gsarti/flores_101_por
922
+ type: gsarti/flores_101_por
923
+ metrics:
924
+ - name: byte_perplexity
925
+ type: byte_perplexity
926
+ value: 1.8411472115156693
927
+ verified: false
928
+ - task:
929
+ type: text-generation
930
+ name: text generation
931
+ dataset:
932
+ name: gsarti/flores_101_pus
933
+ type: gsarti/flores_101_pus
934
+ metrics:
935
+ - name: byte_perplexity
936
+ type: byte_perplexity
937
+ value: 4.623872921169341
938
+ verified: false
939
+ - task:
940
+ type: text-generation
941
+ name: text generation
942
+ dataset:
943
+ name: gsarti/flores_101_ron
944
+ type: gsarti/flores_101_ron
945
+ metrics:
946
+ - name: byte_perplexity
947
+ type: byte_perplexity
948
+ value: 3.049829411973529
949
+ verified: false
950
+ - task:
951
+ type: text-generation
952
+ name: text generation
953
+ dataset:
954
+ name: gsarti/flores_101_rus
955
+ type: gsarti/flores_101_rus
956
+ metrics:
957
+ - name: byte_perplexity
958
+ type: byte_perplexity
959
+ value: 1.7083443875791493
960
+ verified: false
961
+ - task:
962
+ type: text-generation
963
+ name: text generation
964
+ dataset:
965
+ name: gsarti/flores_101_slk
966
+ type: gsarti/flores_101_slk
967
+ metrics:
968
+ - name: byte_perplexity
969
+ type: byte_perplexity
970
+ value: 4.037719650548048
971
+ verified: false
972
+ - task:
973
+ type: text-generation
974
+ name: text generation
975
+ dataset:
976
+ name: gsarti/flores_101_slv
977
+ type: gsarti/flores_101_slv
978
+ metrics:
979
+ - name: byte_perplexity
980
+ type: byte_perplexity
981
+ value: 4.141036287764831
982
+ verified: false
983
+ - task:
984
+ type: text-generation
985
+ name: text generation
986
+ dataset:
987
+ name: gsarti/flores_101_sna
988
+ type: gsarti/flores_101_sna
989
+ metrics:
990
+ - name: byte_perplexity
991
+ type: byte_perplexity
992
+ value: 4.7109183690601295
993
+ verified: false
994
+ - task:
995
+ type: text-generation
996
+ name: text generation
997
+ dataset:
998
+ name: gsarti/flores_101_snd
999
+ type: gsarti/flores_101_snd
1000
+ metrics:
1001
+ - name: byte_perplexity
1002
+ type: byte_perplexity
1003
+ value: 4.206170931541356
1004
+ verified: false
1005
+ - task:
1006
+ type: text-generation
1007
+ name: text generation
1008
+ dataset:
1009
+ name: gsarti/flores_101_som
1010
+ type: gsarti/flores_101_som
1011
+ metrics:
1012
+ - name: byte_perplexity
1013
+ type: byte_perplexity
1014
+ value: 9.154342083821405
1015
+ verified: false
1016
+ - task:
1017
+ type: text-generation
1018
+ name: text generation
1019
+ dataset:
1020
+ name: gsarti/flores_101_spa
1021
+ type: gsarti/flores_101_spa
1022
+ metrics:
1023
+ - name: byte_perplexity
1024
+ type: byte_perplexity
1025
+ value: 1.7955816311143258
1026
+ verified: false
1027
+ - task:
1028
+ type: text-generation
1029
+ name: text generation
1030
+ dataset:
1031
+ name: gsarti/flores_101_srp
1032
+ type: gsarti/flores_101_srp
1033
+ metrics:
1034
+ - name: byte_perplexity
1035
+ type: byte_perplexity
1036
+ value: 2.241096141430147
1037
+ verified: false
1038
+ - task:
1039
+ type: text-generation
1040
+ name: text generation
1041
+ dataset:
1042
+ name: gsarti/flores_101_swe
1043
+ type: gsarti/flores_101_swe
1044
+ metrics:
1045
+ - name: byte_perplexity
1046
+ type: byte_perplexity
1047
+ value: 3.344977179674293
1048
+ verified: false
1049
+ - task:
1050
+ type: text-generation
1051
+ name: text generation
1052
+ dataset:
1053
+ name: gsarti/flores_101_swh
1054
+ type: gsarti/flores_101_swh
1055
+ metrics:
1056
+ - name: byte_perplexity
1057
+ type: byte_perplexity
1058
+ value: 2.6844272218041634
1059
+ verified: false
1060
+ - task:
1061
+ type: text-generation
1062
+ name: text generation
1063
+ dataset:
1064
+ name: gsarti/flores_101_tam
1065
+ type: gsarti/flores_101_tam
1066
+ metrics:
1067
+ - name: byte_perplexity
1068
+ type: byte_perplexity
1069
+ value: 5.1645951632801745
1070
+ verified: false
1071
+ - task:
1072
+ type: text-generation
1073
+ name: text generation
1074
+ dataset:
1075
+ name: gsarti/flores_101_tel
1076
+ type: gsarti/flores_101_tel
1077
+ metrics:
1078
+ - name: byte_perplexity
1079
+ type: byte_perplexity
1080
+ value: 6.8098996634099445
1081
+ verified: false
1082
+ - task:
1083
+ type: text-generation
1084
+ name: text generation
1085
+ dataset:
1086
+ name: gsarti/flores_101_tgk
1087
+ type: gsarti/flores_101_tgk
1088
+ metrics:
1089
+ - name: byte_perplexity
1090
+ type: byte_perplexity
1091
+ value: 3.785457016715163
1092
+ verified: false
1093
+ - task:
1094
+ type: text-generation
1095
+ name: text generation
1096
+ dataset:
1097
+ name: gsarti/flores_101_tgl
1098
+ type: gsarti/flores_101_tgl
1099
+ metrics:
1100
+ - name: byte_perplexity
1101
+ type: byte_perplexity
1102
+ value: 3.7498953645610875
1103
+ verified: false
1104
+ - task:
1105
+ type: text-generation
1106
+ name: text generation
1107
+ dataset:
1108
+ name: gsarti/flores_101_tha
1109
+ type: gsarti/flores_101_tha
1110
+ metrics:
1111
+ - name: byte_perplexity
1112
+ type: byte_perplexity
1113
+ value: 2.104151663233468
1114
+ verified: false
1115
+ - task:
1116
+ type: text-generation
1117
+ name: text generation
1118
+ dataset:
1119
+ name: gsarti/flores_101_tur
1120
+ type: gsarti/flores_101_tur
1121
+ metrics:
1122
+ - name: byte_perplexity
1123
+ type: byte_perplexity
1124
+ value: 3.3178240103796037
1125
+ verified: false
1126
+ - task:
1127
+ type: text-generation
1128
+ name: text generation
1129
+ dataset:
1130
+ name: gsarti/flores_101_ukr
1131
+ type: gsarti/flores_101_ukr
1132
+ metrics:
1133
+ - name: byte_perplexity
1134
+ type: byte_perplexity
1135
+ value: 2.088543437159643
1136
+ verified: false
1137
+ - task:
1138
+ type: text-generation
1139
+ name: text generation
1140
+ dataset:
1141
+ name: gsarti/flores_101_umb
1142
+ type: gsarti/flores_101_umb
1143
+ metrics:
1144
+ - name: byte_perplexity
1145
+ type: byte_perplexity
1146
+ value: 11.766013385445124
1147
+ verified: false
1148
+ - task:
1149
+ type: text-generation
1150
+ name: text generation
1151
+ dataset:
1152
+ name: gsarti/flores_101_urd
1153
+ type: gsarti/flores_101_urd
1154
+ metrics:
1155
+ - name: byte_perplexity
1156
+ type: byte_perplexity
1157
+ value: 1.7788699847612357
1158
+ verified: false
1159
+ - task:
1160
+ type: text-generation
1161
+ name: text generation
1162
+ dataset:
1163
+ name: gsarti/flores_101_uzb
1164
+ type: gsarti/flores_101_uzb
1165
+ metrics:
1166
+ - name: byte_perplexity
1167
+ type: byte_perplexity
1168
+ value: 8.499879863290486
1169
+ verified: false
1170
+ - task:
1171
+ type: text-generation
1172
+ name: text generation
1173
+ dataset:
1174
+ name: gsarti/flores_101_vie
1175
+ type: gsarti/flores_101_vie
1176
+ metrics:
1177
+ - name: byte_perplexity
1178
+ type: byte_perplexity
1179
+ value: 1.65901207387262
1180
+ verified: false
1181
+ - task:
1182
+ type: text-generation
1183
+ name: text generation
1184
+ dataset:
1185
+ name: gsarti/flores_101_wol
1186
+ type: gsarti/flores_101_wol
1187
+ metrics:
1188
+ - name: byte_perplexity
1189
+ type: byte_perplexity
1190
+ value: 6.141703791276928
1191
+ verified: false
1192
+ - task:
1193
+ type: text-generation
1194
+ name: text generation
1195
+ dataset:
1196
+ name: gsarti/flores_101_xho
1197
+ type: gsarti/flores_101_xho
1198
+ metrics:
1199
+ - name: byte_perplexity
1200
+ type: byte_perplexity
1201
+ value: 4.690199677955254
1202
+ verified: false
1203
+ - task:
1204
+ type: text-generation
1205
+ name: text generation
1206
+ dataset:
1207
+ name: gsarti/flores_101_yor
1208
+ type: gsarti/flores_101_yor
1209
+ metrics:
1210
+ - name: byte_perplexity
1211
+ type: byte_perplexity
1212
+ value: 4.360585696242932
1213
+ verified: false
1214
+ - task:
1215
+ type: text-generation
1216
+ name: text generation
1217
+ dataset:
1218
+ name: gsarti/flores_101_zho_simpl
1219
+ type: gsarti/flores_101_zho_simpl
1220
+ metrics:
1221
+ - name: byte_perplexity
1222
+ type: byte_perplexity
1223
+ value: 2.1183545781883515
1224
+ verified: false
1225
+ - task:
1226
+ type: text-generation
1227
+ name: text generation
1228
+ dataset:
1229
+ name: gsarti/flores_101_zho_trad
1230
+ type: gsarti/flores_101_zho_trad
1231
+ metrics:
1232
+ - name: byte_perplexity
1233
+ type: byte_perplexity
1234
+ value: 2.273787884962656
1235
+ verified: false
1236
+ - task:
1237
+ type: text-generation
1238
+ name: text generation
1239
+ dataset:
1240
+ name: gsarti/flores_101_zul
1241
+ type: gsarti/flores_101_zul
1242
+ metrics:
1243
+ - name: byte_perplexity
1244
+ type: byte_perplexity
1245
+ value: 6.016954767729589
1246
+ verified: false
1247
+ - task:
1248
+ type: text-generation
1249
+ name: text generation
1250
+ dataset:
1251
+ name: headqa
1252
+ type: headqa
1253
+ metrics:
1254
+ - name: acc
1255
+ type: acc
1256
+ value: 0.3464624361779723
1257
+ verified: false
1258
+ - task:
1259
+ type: text-generation
1260
+ name: text generation
1261
+ dataset:
1262
+ name: hellaswag
1263
+ type: hellaswag
1264
+ metrics:
1265
+ - name: acc
1266
+ type: acc
1267
+ value: 0.5353515236008763
1268
+ verified: false
1269
+ - task:
1270
+ type: text-generation
1271
+ name: text generation
1272
+ dataset:
1273
+ name: lambada_mt_de
1274
+ type: lambada_mt_de
1275
+ metrics:
1276
+ - name: acc
1277
+ type: acc
1278
+ value: 0.3291286629148069
1279
+ verified: false
1280
+ - task:
1281
+ type: text-generation
1282
+ name: text generation
1283
+ dataset:
1284
+ name: lambada_mt_en
1285
+ type: lambada_mt_en
1286
+ metrics:
1287
+ - name: acc
1288
+ type: acc
1289
+ value: 0.6720357073549389
1290
+ verified: false
1291
+ - task:
1292
+ type: text-generation
1293
+ name: text generation
1294
+ dataset:
1295
+ name: lambada_mt_es
1296
+ type: lambada_mt_es
1297
+ metrics:
1298
+ - name: acc
1299
+ type: acc
1300
+ value: 0.476421502037648
1301
+ verified: false
1302
+ - task:
1303
+ type: text-generation
1304
+ name: text generation
1305
+ dataset:
1306
+ name: lambada_mt_it
1307
+ type: lambada_mt_it
1308
+ metrics:
1309
+ - name: acc
1310
+ type: acc
1311
+ value: 0.4061711624296526
1312
+ verified: false
1313
+ - task:
1314
+ type: text-generation
1315
+ name: text generation
1316
+ dataset:
1317
+ name: logiqa
1318
+ type: logiqa
1319
+ metrics:
1320
+ - name: acc
1321
+ type: acc
1322
+ value: 0.2350230414746544
1323
+ verified: false
1324
+ - task:
1325
+ type: text-generation
1326
+ name: text generation
1327
+ dataset:
1328
+ name: mathqa
1329
+ type: mathqa
1330
+ metrics:
1331
+ - name: acc
1332
+ type: acc
1333
+ value: 0.27671691792294806
1334
+ verified: false
1335
+ - task:
1336
+ type: text-generation
1337
+ name: text generation
1338
+ dataset:
1339
+ name: mc_taco
1340
+ type: mc_taco
1341
+ metrics:
1342
+ - name: em
1343
+ type: em
1344
+ value: 0.13063063063063063
1345
+ verified: false
1346
+ - task:
1347
+ type: text-generation
1348
+ name: text generation
1349
+ dataset:
1350
+ name: mnli
1351
+ type: mnli
1352
+ metrics:
1353
+ - name: acc
1354
+ type: acc
1355
+ value: 0.3545565500406835
1356
+ verified: false
1357
+ - task:
1358
+ type: text-generation
1359
+ name: text generation
1360
+ dataset:
1361
+ name: mnli_mismatched
1362
+ type: mnli_mismatched
1363
+ metrics:
1364
+ - name: acc
1365
+ type: acc
1366
+ value: 0.3545565500406835
1367
+ verified: false
1368
+ - task:
1369
+ type: text-generation
1370
+ name: text generation
1371
+ dataset:
1372
+ name: mrpc
1373
+ type: mrpc
1374
+ metrics:
1375
+ - name: acc
1376
+ type: acc
1377
+ value: 0.3872549019607843
1378
+ verified: false
1379
+ - task:
1380
+ type: text-generation
1381
+ name: text generation
1382
+ dataset:
1383
+ name: multirc
1384
+ type: multirc
1385
+ metrics:
1386
+ - name: acc
1387
+ type: acc
1388
+ value: 0.570957095709571
1389
+ verified: false
1390
+ - task:
1391
+ type: text-generation
1392
+ name: text generation
1393
+ dataset:
1394
+ name: openbookqa
1395
+ type: openbookqa
1396
+ metrics:
1397
+ - name: acc
1398
+ type: acc
1399
+ value: 0.312
1400
+ verified: false
1401
+ - task:
1402
+ type: text-generation
1403
+ name: text generation
1404
+ dataset:
1405
+ name: piqa
1406
+ type: piqa
1407
+ metrics:
1408
+ - name: acc
1409
+ type: acc
1410
+ value: 0.7812840043525572
1411
+ verified: false
1412
+ - task:
1413
+ type: text-generation
1414
+ name: text generation
1415
+ dataset:
1416
+ name: prost
1417
+ type: prost
1418
+ metrics:
1419
+ - name: acc
1420
+ type: acc
1421
+ value: 0.2977156276686593
1422
+ verified: false
1423
+ - task:
1424
+ type: text-generation
1425
+ name: text generation
1426
+ dataset:
1427
+ name: pubmedqa
1428
+ type: pubmedqa
1429
+ metrics:
1430
+ - name: acc
1431
+ type: acc
1432
+ value: 0.741
1433
+ verified: false
1434
+ - task:
1435
+ type: text-generation
1436
+ name: text generation
1437
+ dataset:
1438
+ name: qnli
1439
+ type: qnli
1440
+ metrics:
1441
+ - name: acc
1442
+ type: acc
1443
+ value: 0.5172981878088962
1444
+ verified: false
1445
+ - task:
1446
+ type: text-generation
1447
+ name: text generation
1448
+ dataset:
1449
+ name: qqp
1450
+ type: qqp
1451
+ metrics:
1452
+ - name: acc
1453
+ type: acc
1454
+ value: 0.5883007667573584
1455
+ verified: false
1456
+ - task:
1457
+ type: text-generation
1458
+ name: text generation
1459
+ dataset:
1460
+ name: race
1461
+ type: race
1462
+ metrics:
1463
+ - name: acc
1464
+ type: acc
1465
+ value: 0.39043062200956935
1466
+ verified: false
1467
+ - task:
1468
+ type: text-generation
1469
+ name: text generation
1470
+ dataset:
1471
+ name: rte
1472
+ type: rte
1473
+ metrics:
1474
+ - name: acc
1475
+ type: acc
1476
+ value: 0.5198555956678701
1477
+ verified: false
1478
+ - task:
1479
+ type: text-generation
1480
+ name: text generation
1481
+ dataset:
1482
+ name: sciq
1483
+ type: sciq
1484
+ metrics:
1485
+ - name: acc
1486
+ type: acc
1487
+ value: 0.936
1488
+ verified: false
1489
+ - task:
1490
+ type: text-generation
1491
+ name: text generation
1492
+ dataset:
1493
+ name: sst
1494
+ type: sst
1495
+ metrics:
1496
+ - name: acc
1497
+ type: acc
1498
+ value: 0.6043577981651376
1499
+ verified: false
1500
+ - task:
1501
+ type: text-generation
1502
+ name: text generation
1503
+ dataset:
1504
+ name: triviaqa
1505
+ type: triviaqa
1506
+ metrics:
1507
+ - name: acc
1508
+ type: acc
1509
+ value: 0.18332891363917617
1510
+ verified: false
1511
+ - task:
1512
+ type: text-generation
1513
+ name: text generation
1514
+ dataset:
1515
+ name: tydiqa_primary
1516
+ type: tydiqa_primary
1517
+ metrics:
1518
+ - name: acc
1519
+ type: acc
1520
+ value: 0.2809817301342725
1521
+ verified: false
1522
+ - task:
1523
+ type: text-generation
1524
+ name: text generation
1525
+ dataset:
1526
+ name: webqs
1527
+ type: webqs
1528
+ metrics:
1529
+ - name: acc
1530
+ type: acc
1531
+ value: 0.061515748031496065
1532
+ verified: false
1533
+ - task:
1534
+ type: text-generation
1535
+ name: text generation
1536
+ dataset:
1537
+ name: wic
1538
+ type: wic
1539
+ metrics:
1540
+ - name: acc
1541
+ type: acc
1542
+ value: 0.5062695924764891
1543
+ verified: false
1544
+ - task:
1545
+ type: text-generation
1546
+ name: text generation
1547
+ dataset:
1548
+ name: winogrande
1549
+ type: winogrande
1550
+ metrics:
1551
+ - name: acc
1552
+ type: acc
1553
+ value: 0.7095501183898973
1554
+ verified: false
1555
+ - task:
1556
+ type: text-generation
1557
+ name: text generation
1558
+ dataset:
1559
+ name: wnli
1560
+ type: wnli
1561
+ metrics:
1562
+ - name: acc
1563
+ type: acc
1564
+ value: 0.5704225352112676
1565
+ verified: false
1566
+ - task:
1567
+ type: text-generation
1568
+ name: text generation
1569
+ dataset:
1570
+ name: wsc
1571
+ type: wsc
1572
+ metrics:
1573
+ - name: acc
1574
+ type: acc
1575
+ value: 0.5192307692307693
1576
+ verified: false
1577
+ - task:
1578
+ type: text-generation
1579
+ name: text generation
1580
+ dataset:
1581
+ name: humaneval
1582
+ type: humaneval
1583
+ metrics:
1584
+ - name: pass@1
1585
+ type: pass@1
1586
+ value: 0.15524390243902436
1587
+ verified: false
1588
+ - name: pass@10
1589
+ type: pass@10
1590
+ value: 0.3220367632383857
1591
+ verified: false
1592
+ - name: pass@100
1593
+ type: pass@100
1594
+ value: 0.5545431515723145
1595
+ verified: false
evaluation/results/tr11/bloom1b3/bslmeval.json ADDED
@@ -0,0 +1,2938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "2022-07-13-11-29-13": {
5
+ "acc": 0.23464163822525597,
6
+ "acc_norm": 0.26791808873720135,
7
+ "acc_norm_stderr": 0.012942030195136423,
8
+ "acc_stderr": 0.012383873560768673
9
+ }
10
+ },
11
+ "arc_easy": {
12
+ "2022-07-13-11-29-13": {
13
+ "acc": 0.5631313131313131,
14
+ "acc_norm": 0.4810606060606061,
15
+ "acc_norm_stderr": 0.010252420496894487,
16
+ "acc_stderr": 0.010177672928157678
17
+ }
18
+ },
19
+ "axb+GPT-3 style": {
20
+ "2022-07-15-11-47-34": {
21
+ "acc": 0.4855072463768116,
22
+ "acc_norm": 0.5878623188405797,
23
+ "acc_norm_stderr": 0.014820785339690506,
24
+ "acc_stderr": 0.015048725939283577,
25
+ "prompt_name": "GPT-3 style",
26
+ "task_name": "axb"
27
+ }
28
+ },
29
+ "axb+MNLI crowdsource": {
30
+ "2022-07-15-11-47-34": {
31
+ "acc": 0.447463768115942,
32
+ "acc_norm": 0.4166666666666667,
33
+ "acc_norm_stderr": 0.014844481058991162,
34
+ "acc_stderr": 0.0149717153798021,
35
+ "prompt_name": "MNLI crowdsource",
36
+ "task_name": "axb"
37
+ }
38
+ },
39
+ "axb+based on the previous passage": {
40
+ "2022-07-15-11-47-34": {
41
+ "acc": 0.4846014492753623,
42
+ "acc_norm": 0.4166666666666667,
43
+ "acc_norm_stderr": 0.014844481058991162,
44
+ "acc_stderr": 0.015047910329698355,
45
+ "prompt_name": "based on the previous passage",
46
+ "task_name": "axb"
47
+ }
48
+ },
49
+ "axb+can we infer": {
50
+ "2022-07-15-11-47-34": {
51
+ "acc": 0.421195652173913,
52
+ "acc_norm": 0.4166666666666667,
53
+ "acc_norm_stderr": 0.014844481058991162,
54
+ "acc_stderr": 0.014866888213508284,
55
+ "prompt_name": "can we infer",
56
+ "task_name": "axb"
57
+ }
58
+ },
59
+ "axb+does it follow that": {
60
+ "2022-07-15-11-47-34": {
61
+ "acc": 0.4375,
62
+ "acc_norm": 0.4166666666666667,
63
+ "acc_norm_stderr": 0.014844481058991162,
64
+ "acc_stderr": 0.014936970932375573,
65
+ "prompt_name": "does it follow that",
66
+ "task_name": "axb"
67
+ }
68
+ },
69
+ "axb+does this imply": {
70
+ "2022-07-15-11-47-34": {
71
+ "acc": 0.5353260869565217,
72
+ "acc_norm": 0.4166666666666667,
73
+ "acc_norm_stderr": 0.014844481058991162,
74
+ "acc_stderr": 0.015017429208641943,
75
+ "prompt_name": "does this imply",
76
+ "task_name": "axb"
77
+ }
78
+ },
79
+ "axb+guaranteed true": {
80
+ "2022-07-15-11-47-34": {
81
+ "acc": 0.44655797101449274,
82
+ "acc_norm": 0.4166666666666667,
83
+ "acc_norm_stderr": 0.014844481058991162,
84
+ "acc_stderr": 0.014968808595500557,
85
+ "prompt_name": "guaranteed true",
86
+ "task_name": "axb"
87
+ }
88
+ },
89
+ "axb+justified in saying": {
90
+ "2022-07-15-11-47-34": {
91
+ "acc": 0.4365942028985507,
92
+ "acc_norm": 0.4166666666666667,
93
+ "acc_norm_stderr": 0.014844481058991162,
94
+ "acc_stderr": 0.014933509475434285,
95
+ "prompt_name": "justified in saying",
96
+ "task_name": "axb"
97
+ }
98
+ },
99
+ "axb+must be true": {
100
+ "2022-07-15-11-47-34": {
101
+ "acc": 0.4266304347826087,
102
+ "acc_norm": 0.4166666666666667,
103
+ "acc_norm_stderr": 0.014844481058991162,
104
+ "acc_stderr": 0.014892084059444173,
105
+ "prompt_name": "must be true",
106
+ "task_name": "axb"
107
+ }
108
+ },
109
+ "axb+should assume": {
110
+ "2022-07-15-11-47-34": {
111
+ "acc": 0.5163043478260869,
112
+ "acc_norm": 0.4157608695652174,
113
+ "acc_norm_stderr": 0.014839845193003246,
114
+ "acc_stderr": 0.015047045240919796,
115
+ "prompt_name": "should assume",
116
+ "task_name": "axb"
117
+ }
118
+ },
119
+ "axg+GPT-3 style": {
120
+ "2022-07-15-11-47-34": {
121
+ "acc": 0.4803370786516854,
122
+ "acc_norm": 0.5,
123
+ "acc_norm_stderr": 0.026537244621713762,
124
+ "acc_stderr": 0.02651671646679541,
125
+ "parity": 0.9606741573033708,
126
+ "parity_stderr": 0.01460967112412074,
127
+ "prompt_name": "GPT-3 style",
128
+ "task_name": "axg"
129
+ }
130
+ },
131
+ "axg+MNLI crowdsource": {
132
+ "2022-07-15-11-47-34": {
133
+ "acc": 0.5140449438202247,
134
+ "acc_norm": 0.5,
135
+ "acc_norm_stderr": 0.026537244621713762,
136
+ "acc_stderr": 0.026526773058212952,
137
+ "parity": 0.9719101123595506,
138
+ "parity_stderr": 0.012419422972302346,
139
+ "prompt_name": "MNLI crowdsource",
140
+ "task_name": "axg"
141
+ }
142
+ },
143
+ "axg+based on the previous passage": {
144
+ "2022-07-15-11-47-34": {
145
+ "acc": 0.5,
146
+ "acc_norm": 0.5,
147
+ "acc_norm_stderr": 0.026537244621713762,
148
+ "acc_stderr": 0.026537244621713762,
149
+ "parity": 0.9662921348314607,
150
+ "parity_stderr": 0.013565419020002358,
151
+ "prompt_name": "based on the previous passage",
152
+ "task_name": "axg"
153
+ }
154
+ },
155
+ "axg+can we infer": {
156
+ "2022-07-15-11-47-34": {
157
+ "acc": 0.5,
158
+ "acc_norm": 0.5,
159
+ "acc_norm_stderr": 0.026537244621713762,
160
+ "acc_stderr": 0.026537244621713762,
161
+ "parity": 0.9887640449438202,
162
+ "parity_stderr": 0.007922544664164389,
163
+ "prompt_name": "can we infer",
164
+ "task_name": "axg"
165
+ }
166
+ },
167
+ "axg+does it follow that": {
168
+ "2022-07-15-11-47-34": {
169
+ "acc": 0.5,
170
+ "acc_norm": 0.5,
171
+ "acc_norm_stderr": 0.026537244621713762,
172
+ "acc_stderr": 0.026537244621713762,
173
+ "parity": 1.0,
174
+ "parity_stderr": 0.0,
175
+ "prompt_name": "does it follow that",
176
+ "task_name": "axg"
177
+ }
178
+ },
179
+ "axg+does this imply": {
180
+ "2022-07-15-11-47-34": {
181
+ "acc": 0.49719101123595505,
182
+ "acc_norm": 0.5,
183
+ "acc_norm_stderr": 0.026537244621713762,
184
+ "acc_stderr": 0.026536825838510643,
185
+ "parity": 0.9831460674157303,
186
+ "parity_stderr": 0.009675491064988365,
187
+ "prompt_name": "does this imply",
188
+ "task_name": "axg"
189
+ }
190
+ },
191
+ "axg+guaranteed true": {
192
+ "2022-07-15-11-47-34": {
193
+ "acc": 0.48314606741573035,
194
+ "acc_norm": 0.5,
195
+ "acc_norm_stderr": 0.026537244621713762,
196
+ "acc_stderr": 0.026522164260489825,
197
+ "parity": 0.9887640449438202,
198
+ "parity_stderr": 0.007922544664164387,
199
+ "prompt_name": "guaranteed true",
200
+ "task_name": "axg"
201
+ }
202
+ },
203
+ "axg+justified in saying": {
204
+ "2022-07-15-11-47-34": {
205
+ "acc": 0.5,
206
+ "acc_norm": 0.5,
207
+ "acc_norm_stderr": 0.026537244621713762,
208
+ "acc_stderr": 0.026537244621713762,
209
+ "parity": 0.9887640449438202,
210
+ "parity_stderr": 0.007922544664164385,
211
+ "prompt_name": "justified in saying",
212
+ "task_name": "axg"
213
+ }
214
+ },
215
+ "axg+must be true": {
216
+ "2022-07-15-11-47-34": {
217
+ "acc": 0.4803370786516854,
218
+ "acc_norm": 0.5,
219
+ "acc_norm_stderr": 0.026537244621713762,
220
+ "acc_stderr": 0.026516716466795417,
221
+ "parity": 0.9719101123595506,
222
+ "parity_stderr": 0.012419422972302347,
223
+ "prompt_name": "must be true",
224
+ "task_name": "axg"
225
+ }
226
+ },
227
+ "axg+should assume": {
228
+ "2022-07-15-11-47-34": {
229
+ "acc": 0.49719101123595505,
230
+ "acc_norm": 0.5,
231
+ "acc_norm_stderr": 0.026537244621713762,
232
+ "acc_stderr": 0.026536825838510643,
233
+ "parity": 0.949438202247191,
234
+ "parity_stderr": 0.016468648928151884,
235
+ "prompt_name": "should assume",
236
+ "task_name": "axg"
237
+ }
238
+ },
239
+ "boolq": {
240
+ "2022-07-13-11-29-13": {
241
+ "acc": 0.617737003058104,
242
+ "acc_stderr": 0.008499149690449272
243
+ }
244
+ },
245
+ "boolq+GPT-3 Style": {
246
+ "2022-07-15-11-47-34": {
247
+ "acc": 0.5896024464831804,
248
+ "acc_norm": 0.6211009174311927,
249
+ "acc_norm_stderr": 0.008484678718565017,
250
+ "acc_stderr": 0.008603488048617526,
251
+ "prompt_name": "GPT-3 Style",
252
+ "task_name": "boolq"
253
+ }
254
+ },
255
+ "boolq+I wonder\u2026": {
256
+ "2022-07-15-11-47-34": {
257
+ "acc": 0.563914373088685,
258
+ "acc_norm": 0.6217125382262997,
259
+ "acc_norm_stderr": 0.00848200113393099,
260
+ "acc_stderr": 0.008673312776324934,
261
+ "prompt_name": "I wonder\u2026",
262
+ "task_name": "boolq"
263
+ }
264
+ },
265
+ "boolq+after_reading": {
266
+ "2022-07-15-11-47-34": {
267
+ "acc": 0.6217125382262997,
268
+ "acc_norm": 0.3804281345565749,
269
+ "acc_norm_stderr": 0.008491310027059626,
270
+ "acc_stderr": 0.00848200113393099,
271
+ "prompt_name": "after_reading",
272
+ "task_name": "boolq"
273
+ }
274
+ },
275
+ "boolq+based on the following passage": {
276
+ "2022-07-15-11-47-34": {
277
+ "acc": 0.3798165137614679,
278
+ "acc_norm": 0.6012232415902141,
279
+ "acc_norm_stderr": 0.008563973987729906,
280
+ "acc_stderr": 0.008488668235778644,
281
+ "prompt_name": "based on the following passage",
282
+ "task_name": "boolq"
283
+ }
284
+ },
285
+ "boolq+based on the previous passage": {
286
+ "2022-07-15-11-47-34": {
287
+ "acc": 0.6146788990825688,
288
+ "acc_norm": 0.6217125382262997,
289
+ "acc_norm_stderr": 0.00848200113393099,
290
+ "acc_stderr": 0.008511930879680635,
291
+ "prompt_name": "based on the previous passage",
292
+ "task_name": "boolq"
293
+ }
294
+ },
295
+ "boolq+could you tell me\u2026": {
296
+ "2022-07-15-11-47-34": {
297
+ "acc": 0.5840978593272171,
298
+ "acc_norm": 0.6217125382262997,
299
+ "acc_norm_stderr": 0.00848200113393099,
300
+ "acc_stderr": 0.008620469604001,
301
+ "prompt_name": "could you tell me\u2026",
302
+ "task_name": "boolq"
303
+ }
304
+ },
305
+ "boolq+exam": {
306
+ "2022-07-15-11-47-34": {
307
+ "acc": 0.6220183486238532,
308
+ "acc_norm": 0.6217125382262997,
309
+ "acc_norm_stderr": 0.00848200113393099,
310
+ "acc_stderr": 0.008480656964585267,
311
+ "prompt_name": "exam",
312
+ "task_name": "boolq"
313
+ }
314
+ },
315
+ "boolq+exercise": {
316
+ "2022-07-15-11-47-34": {
317
+ "acc": 0.6217125382262997,
318
+ "acc_norm": 0.46788990825688076,
319
+ "acc_norm_stderr": 0.0087270030269178,
320
+ "acc_stderr": 0.00848200113393099,
321
+ "prompt_name": "exercise",
322
+ "task_name": "boolq"
323
+ }
324
+ },
325
+ "boolq+valid_binary": {
326
+ "2022-07-15-11-47-34": {
327
+ "acc": 0.491131498470948,
328
+ "acc_norm": 0.37370030581039754,
329
+ "acc_norm_stderr": 0.008461461177104003,
330
+ "acc_stderr": 0.008743679265456042,
331
+ "prompt_name": "valid_binary",
332
+ "task_name": "boolq"
333
+ }
334
+ },
335
+ "boolq+yes_no_question": {
336
+ "2022-07-15-11-47-34": {
337
+ "acc": 0.5951070336391437,
338
+ "acc_norm": 0.6217125382262997,
339
+ "acc_norm_stderr": 0.00848200113393099,
340
+ "acc_stderr": 0.008585393347962319,
341
+ "prompt_name": "yes_no_question",
342
+ "task_name": "boolq"
343
+ }
344
+ },
345
+ "cb+GPT-3 style": {
346
+ "2022-07-15-11-47-34": {
347
+ "acc": 0.42857142857142855,
348
+ "acc_stderr": 0.06672848092813057,
349
+ "f1": 0.21956970232832299,
350
+ "prompt_name": "GPT-3 style",
351
+ "task_name": "cb"
352
+ }
353
+ },
354
+ "cb+MNLI crowdsource": {
355
+ "2022-07-15-11-47-34": {
356
+ "acc": 0.42857142857142855,
357
+ "acc_stderr": 0.06672848092813057,
358
+ "f1": 0.21956970232832299,
359
+ "prompt_name": "MNLI crowdsource",
360
+ "task_name": "cb"
361
+ }
362
+ },
363
+ "cb+always/sometimes/never": {
364
+ "2022-07-15-11-47-34": {
365
+ "acc": 0.08928571428571429,
366
+ "acc_stderr": 0.038450387280282494,
367
+ "f1": 0.054644808743169404,
368
+ "prompt_name": "always/sometimes/never",
369
+ "task_name": "cb"
370
+ }
371
+ },
372
+ "cb+based on the previous passage": {
373
+ "2022-07-15-11-47-34": {
374
+ "acc": 0.35714285714285715,
375
+ "acc_stderr": 0.06460957383809221,
376
+ "f1": 0.2094181249110827,
377
+ "prompt_name": "based on the previous passage",
378
+ "task_name": "cb"
379
+ }
380
+ },
381
+ "cb+can we infer": {
382
+ "2022-07-15-11-47-34": {
383
+ "acc": 0.25,
384
+ "acc_stderr": 0.058387420812114225,
385
+ "f1": 0.15483870967741933,
386
+ "prompt_name": "can we infer",
387
+ "task_name": "cb"
388
+ }
389
+ },
390
+ "cb+claim true/false/inconclusive": {
391
+ "2022-07-15-11-47-34": {
392
+ "acc": 0.42857142857142855,
393
+ "acc_stderr": 0.06672848092813057,
394
+ "f1": 0.21956970232832299,
395
+ "prompt_name": "claim true/false/inconclusive",
396
+ "task_name": "cb"
397
+ }
398
+ },
399
+ "cb+consider always/sometimes/never": {
400
+ "2022-07-15-11-47-34": {
401
+ "acc": 0.08928571428571429,
402
+ "acc_stderr": 0.038450387280282494,
403
+ "f1": 0.054644808743169404,
404
+ "prompt_name": "consider always/sometimes/never",
405
+ "task_name": "cb"
406
+ }
407
+ },
408
+ "cb+does it follow that": {
409
+ "2022-07-15-11-47-34": {
410
+ "acc": 0.30357142857142855,
411
+ "acc_stderr": 0.06199938655510754,
412
+ "f1": 0.2613574165298303,
413
+ "prompt_name": "does it follow that",
414
+ "task_name": "cb"
415
+ }
416
+ },
417
+ "cb+does this imply": {
418
+ "2022-07-15-11-47-34": {
419
+ "acc": 0.10714285714285714,
420
+ "acc_stderr": 0.0417053005800816,
421
+ "f1": 0.11222753854332802,
422
+ "prompt_name": "does this imply",
423
+ "task_name": "cb"
424
+ }
425
+ },
426
+ "cb+guaranteed true": {
427
+ "2022-07-15-11-47-34": {
428
+ "acc": 0.21428571428571427,
429
+ "acc_stderr": 0.055328333517248834,
430
+ "f1": 0.15883777239709443,
431
+ "prompt_name": "guaranteed true",
432
+ "task_name": "cb"
433
+ }
434
+ },
435
+ "cb+guaranteed/possible/impossible": {
436
+ "2022-07-15-11-47-34": {
437
+ "acc": 0.10714285714285714,
438
+ "acc_stderr": 0.0417053005800816,
439
+ "f1": 0.07871939736346516,
440
+ "prompt_name": "guaranteed/possible/impossible",
441
+ "task_name": "cb"
442
+ }
443
+ },
444
+ "cb+justified in saying": {
445
+ "2022-07-15-11-47-34": {
446
+ "acc": 0.21428571428571427,
447
+ "acc_stderr": 0.055328333517248834,
448
+ "f1": 0.1623009758602979,
449
+ "prompt_name": "justified in saying",
450
+ "task_name": "cb"
451
+ }
452
+ },
453
+ "cb+must be true": {
454
+ "2022-07-15-11-47-34": {
455
+ "acc": 0.19642857142857142,
456
+ "acc_stderr": 0.05357142857142859,
457
+ "f1": 0.1384656508954825,
458
+ "prompt_name": "must be true",
459
+ "task_name": "cb"
460
+ }
461
+ },
462
+ "cb+should assume": {
463
+ "2022-07-15-11-47-34": {
464
+ "acc": 0.19642857142857142,
465
+ "acc_stderr": 0.05357142857142858,
466
+ "f1": 0.14613935969868175,
467
+ "prompt_name": "should assume",
468
+ "task_name": "cb"
469
+ }
470
+ },
471
+ "cb+take the following as truth": {
472
+ "2022-07-15-11-47-34": {
473
+ "acc": 0.4107142857142857,
474
+ "acc_stderr": 0.06633634150359538,
475
+ "f1": 0.1940928270042194,
476
+ "prompt_name": "take the following as truth",
477
+ "task_name": "cb"
478
+ }
479
+ },
480
+ "cola+Following sentence acceptable": {
481
+ "2022-07-15-11-47-34": {
482
+ "acc": 0.6625119846596357,
483
+ "acc_norm": 0.31064237775647174,
484
+ "acc_norm_stderr": 0.014335695984672221,
485
+ "acc_stderr": 0.014648467353878477,
486
+ "prompt_name": "Following sentence acceptable",
487
+ "task_name": "cola"
488
+ }
489
+ },
490
+ "cola+Make sense yes no": {
491
+ "2022-07-15-11-47-34": {
492
+ "acc": 0.3710450623202301,
493
+ "acc_norm": 0.6903163950143816,
494
+ "acc_norm_stderr": 0.014323506235950028,
495
+ "acc_stderr": 0.01496543118537874,
496
+ "prompt_name": "Make sense yes no",
497
+ "task_name": "cola"
498
+ }
499
+ },
500
+ "cola+Previous sentence acceptable": {
501
+ "2022-07-15-11-47-34": {
502
+ "acc": 0.6864813039309684,
503
+ "acc_norm": 0.6912751677852349,
504
+ "acc_norm_stderr": 0.014311244461311299,
505
+ "acc_stderr": 0.014371834902632595,
506
+ "prompt_name": "Previous sentence acceptable",
507
+ "task_name": "cola"
508
+ }
509
+ },
510
+ "cola+editing": {
511
+ "2022-07-15-11-47-34": {
512
+ "acc": 0.46596356663470756,
513
+ "acc_norm": 0.6912751677852349,
514
+ "acc_norm_stderr": 0.014311244461311299,
515
+ "acc_stderr": 0.015453525186655532,
516
+ "prompt_name": "editing",
517
+ "task_name": "cola"
518
+ }
519
+ },
520
+ "cola+is_this_correct": {
521
+ "2022-07-15-11-47-34": {
522
+ "acc": 0.6893576222435283,
523
+ "acc_norm": 0.6912751677852349,
524
+ "acc_norm_stderr": 0.014311244461311299,
525
+ "acc_stderr": 0.014335695984672223,
526
+ "prompt_name": "is_this_correct",
527
+ "task_name": "cola"
528
+ }
529
+ },
530
+ "copa": {
531
+ "2022-07-13-11-29-13": {
532
+ "acc": 0.7,
533
+ "acc_stderr": 0.046056618647183814
534
+ }
535
+ },
536
+ "copa+C1 or C2? premise, so/because\u2026": {
537
+ "2022-07-15-11-47-34": {
538
+ "acc": 0.65,
539
+ "acc_norm": 0.57,
540
+ "acc_norm_stderr": 0.049756985195624284,
541
+ "acc_stderr": 0.047937248544110196,
542
+ "prompt_name": "C1 or C2? premise, so/because\u2026",
543
+ "task_name": "copa"
544
+ }
545
+ },
546
+ "copa+best_option": {
547
+ "2022-07-15-11-47-34": {
548
+ "acc": 0.52,
549
+ "acc_norm": 0.49,
550
+ "acc_norm_stderr": 0.05024183937956911,
551
+ "acc_stderr": 0.050211673156867795,
552
+ "prompt_name": "best_option",
553
+ "task_name": "copa"
554
+ }
555
+ },
556
+ "copa+cause_effect": {
557
+ "2022-07-15-11-47-34": {
558
+ "acc": 0.56,
559
+ "acc_norm": 0.45,
560
+ "acc_norm_stderr": 0.05,
561
+ "acc_stderr": 0.04988876515698589,
562
+ "prompt_name": "cause_effect",
563
+ "task_name": "copa"
564
+ }
565
+ },
566
+ "copa+choose": {
567
+ "2022-07-15-11-47-34": {
568
+ "acc": 0.53,
569
+ "acc_norm": 0.46,
570
+ "acc_norm_stderr": 0.05009082659620333,
571
+ "acc_stderr": 0.05016135580465919,
572
+ "prompt_name": "choose",
573
+ "task_name": "copa"
574
+ }
575
+ },
576
+ "copa+exercise": {
577
+ "2022-07-15-11-47-34": {
578
+ "acc": 0.54,
579
+ "acc_norm": 0.48,
580
+ "acc_norm_stderr": 0.050211673156867795,
581
+ "acc_stderr": 0.05009082659620332,
582
+ "prompt_name": "exercise",
583
+ "task_name": "copa"
584
+ }
585
+ },
586
+ "copa+i_am_hesitating": {
587
+ "2022-07-15-11-47-34": {
588
+ "acc": 0.56,
589
+ "acc_norm": 0.48,
590
+ "acc_norm_stderr": 0.050211673156867795,
591
+ "acc_stderr": 0.04988876515698589,
592
+ "prompt_name": "i_am_hesitating",
593
+ "task_name": "copa"
594
+ }
595
+ },
596
+ "copa+more likely": {
597
+ "2022-07-15-11-47-34": {
598
+ "acc": 0.53,
599
+ "acc_norm": 0.49,
600
+ "acc_norm_stderr": 0.05024183937956911,
601
+ "acc_stderr": 0.05016135580465919,
602
+ "prompt_name": "more likely",
603
+ "task_name": "copa"
604
+ }
605
+ },
606
+ "copa+plausible_alternatives": {
607
+ "2022-07-15-11-47-34": {
608
+ "acc": 0.56,
609
+ "acc_norm": 0.53,
610
+ "acc_norm_stderr": 0.05016135580465919,
611
+ "acc_stderr": 0.04988876515698589,
612
+ "prompt_name": "plausible_alternatives",
613
+ "task_name": "copa"
614
+ }
615
+ },
616
+ "crows_pairs_english+1": {
617
+ "2022-07-12-22-45-57": {
618
+ "acc": 0.49552772808586765,
619
+ "acc_norm": 0.49552772808586765,
620
+ "acc_norm_stderr": 0.012212810647205384,
621
+ "acc_stderr": 0.012212810647205384,
622
+ "prompt_name": "1",
623
+ "task_name": "crows_pairs_english"
624
+ }
625
+ },
626
+ "crows_pairs_english+2": {
627
+ "2022-07-12-22-45-57": {
628
+ "acc": 0.4883720930232558,
629
+ "acc_norm": 0.4883720930232558,
630
+ "acc_norm_stderr": 0.012209996095069646,
631
+ "acc_stderr": 0.012209996095069646,
632
+ "prompt_name": "2",
633
+ "task_name": "crows_pairs_english"
634
+ }
635
+ },
636
+ "crows_pairs_english+3": {
637
+ "2022-07-12-22-45-57": {
638
+ "acc": 0.5163983303518187,
639
+ "acc_norm": 0.4836016696481813,
640
+ "acc_norm_stderr": 0.012206729011137944,
641
+ "acc_stderr": 0.012206729011137944,
642
+ "prompt_name": "3",
643
+ "task_name": "crows_pairs_english"
644
+ }
645
+ },
646
+ "crows_pairs_english+4": {
647
+ "2022-07-12-22-45-57": {
648
+ "acc": 0.4919499105545617,
649
+ "acc_norm": 0.4919499105545617,
650
+ "acc_norm_stderr": 0.01221171617623539,
651
+ "acc_stderr": 0.01221171617623539,
652
+ "prompt_name": "4",
653
+ "task_name": "crows_pairs_english"
654
+ }
655
+ },
656
+ "crows_pairs_english+A_preference": {
657
+ "2022-07-12-22-45-57": {
658
+ "acc": 0.5104353011329755,
659
+ "acc_norm": 0.5104353011329755,
660
+ "acc_norm_stderr": 0.012210638982043397,
661
+ "acc_stderr": 0.012210638982043397,
662
+ "prompt_name": "A_preference",
663
+ "task_name": "crows_pairs_english"
664
+ }
665
+ },
666
+ "crows_pairs_english+A_stereotype_true": {
667
+ "2022-07-12-22-45-57": {
668
+ "acc": 0.4907573047107931,
669
+ "acc_norm": 0.5062611806797853,
670
+ "acc_norm_stderr": 0.012212341600228735,
671
+ "acc_stderr": 0.012211212339167695,
672
+ "prompt_name": "A_stereotype_true",
673
+ "task_name": "crows_pairs_english"
674
+ }
675
+ },
676
+ "crows_pairs_french+1_fr": {
677
+ "2022-07-12-22-45-57": {
678
+ "acc": 0.48598688133571855,
679
+ "acc_norm": 0.48598688133571855,
680
+ "acc_norm_stderr": 0.012208501686447066,
681
+ "acc_stderr": 0.012208501686447066,
682
+ "prompt_name": "1_fr",
683
+ "task_name": "crows_pairs_french"
684
+ }
685
+ },
686
+ "crows_pairs_french+2_fr": {
687
+ "2022-07-12-22-45-57": {
688
+ "acc": 0.49850924269528923,
689
+ "acc_norm": 0.49850924269528923,
690
+ "acc_norm_stderr": 0.01221324493389968,
691
+ "acc_stderr": 0.01221324493389968,
692
+ "prompt_name": "2_fr",
693
+ "task_name": "crows_pairs_french"
694
+ }
695
+ },
696
+ "crows_pairs_french+3_fr": {
697
+ "2022-07-12-22-45-57": {
698
+ "acc": 0.49612403100775193,
699
+ "acc_norm": 0.49612403100775193,
700
+ "acc_norm_stderr": 0.012212932249036454,
701
+ "acc_stderr": 0.012212932249036454,
702
+ "prompt_name": "3_fr",
703
+ "task_name": "crows_pairs_french"
704
+ }
705
+ },
706
+ "crows_pairs_french+4_fr": {
707
+ "2022-07-12-22-45-57": {
708
+ "acc": 0.5313059033989267,
709
+ "acc_norm": 0.5313059033989267,
710
+ "acc_norm_stderr": 0.012189336188399829,
711
+ "acc_stderr": 0.012189336188399829,
712
+ "prompt_name": "4_fr",
713
+ "task_name": "crows_pairs_french"
714
+ }
715
+ },
716
+ "crows_pairs_french+A_preference_fr": {
717
+ "2022-07-12-22-45-57": {
718
+ "acc": 0.4847942754919499,
719
+ "acc_norm": 0.4847942754919499,
720
+ "acc_norm_stderr": 0.01220765013925874,
721
+ "acc_stderr": 0.01220765013925874,
722
+ "prompt_name": "A_preference_fr",
723
+ "task_name": "crows_pairs_french"
724
+ }
725
+ },
726
+ "crows_pairs_french+A_reality_check_fr": {
727
+ "2022-07-12-22-45-57": {
728
+ "acc": 0.505664877757901,
729
+ "acc_norm": 0.505664877757901,
730
+ "acc_norm_stderr": 0.012212515323431717,
731
+ "acc_stderr": 0.012212515323431717,
732
+ "prompt_name": "A_reality_check_fr",
733
+ "task_name": "crows_pairs_french"
734
+ }
735
+ },
736
+ "crows_pairs_french+A_stereotype_true_fr": {
737
+ "2022-07-12-22-45-57": {
738
+ "acc": 0.5020870602265951,
739
+ "acc_norm": 0.5020870602265951,
740
+ "acc_norm_stderr": 0.012213192820312026,
741
+ "acc_stderr": 0.012213192820312026,
742
+ "prompt_name": "A_stereotype_true_fr",
743
+ "task_name": "crows_pairs_french"
744
+ }
745
+ },
746
+ "diabla+Is the error present? (same lang)": {
747
+ "2022-07-12-22-45-57": {
748
+ "acc": 0.07741823242867084,
749
+ "acc_norm": 0.07741823242867084,
750
+ "acc_norm_stderr": 0.0035253599064790993,
751
+ "acc_stderr": 0.0035253599064790993,
752
+ "prompt_name": "Is the error present? (same lang)",
753
+ "task_name": "diabla"
754
+ }
755
+ },
756
+ "diabla+Which is automatic?": {
757
+ "2022-07-12-22-45-57": {
758
+ "acc": 0.4966945024356298,
759
+ "acc_norm": 0.4966945024356298,
760
+ "acc_norm_stderr": 0.0065953813991735995,
761
+ "acc_stderr": 0.0065953813991735995,
762
+ "prompt_name": "Which is automatic?",
763
+ "task_name": "diabla"
764
+ }
765
+ },
766
+ "gsarti/flores_101_afr+null": {
767
+ "2022-07-14-10-03-25": {
768
+ "bits_per_byte": 2.8175051369933213,
769
+ "byte_perplexity": 7.049422805555328,
770
+ "prompt_name": "null",
771
+ "task_name": "gsarti/flores_101_afr",
772
+ "word_perplexity": 139324.0466654445
773
+ }
774
+ },
775
+ "gsarti/flores_101_amh+null": {
776
+ "2022-07-14-10-03-25": {
777
+ "bits_per_byte": 2.0608666814101815,
778
+ "byte_perplexity": 4.172368790188039,
779
+ "prompt_name": "null",
780
+ "task_name": "gsarti/flores_101_amh",
781
+ "word_perplexity": 105036774.30501972
782
+ }
783
+ },
784
+ "gsarti/flores_101_ara+null": {
785
+ "2022-07-14-10-03-25": {
786
+ "bits_per_byte": 0.8797352167688847,
787
+ "byte_perplexity": 1.8400375612633983,
788
+ "prompt_name": "null",
789
+ "task_name": "gsarti/flores_101_ara",
790
+ "word_perplexity": 674.8640314665696
791
+ }
792
+ },
793
+ "gsarti/flores_101_asm+null": {
794
+ "2022-07-14-10-03-25": {
795
+ "bits_per_byte": 2.458711333673663,
796
+ "byte_perplexity": 5.497254736157445,
797
+ "prompt_name": "null",
798
+ "task_name": "gsarti/flores_101_asm",
799
+ "word_perplexity": 6763188828222.085
800
+ }
801
+ },
802
+ "gsarti/flores_101_ast+null": {
803
+ "2022-07-14-10-03-25": {
804
+ "bits_per_byte": 2.0909386784329675,
805
+ "byte_perplexity": 4.260251728273795,
806
+ "prompt_name": "null",
807
+ "task_name": "gsarti/flores_101_ast",
808
+ "word_perplexity": 10657.272913539553
809
+ }
810
+ },
811
+ "gsarti/flores_101_azj+null": {
812
+ "2022-07-14-10-03-25": {
813
+ "bits_per_byte": 2.9432455349850195,
814
+ "byte_perplexity": 7.691396328945705,
815
+ "prompt_name": "null",
816
+ "task_name": "gsarti/flores_101_azj",
817
+ "word_perplexity": 45923924.18878753
818
+ }
819
+ },
820
+ "gsarti/flores_101_bel+null": {
821
+ "2022-07-14-10-03-25": {
822
+ "bits_per_byte": 1.914816732584341,
823
+ "byte_perplexity": 3.7706591215465943,
824
+ "prompt_name": "null",
825
+ "task_name": "gsarti/flores_101_bel",
826
+ "word_perplexity": 23935692.781315073
827
+ }
828
+ },
829
+ "gsarti/flores_101_ben+null": {
830
+ "2022-07-14-10-03-25": {
831
+ "bits_per_byte": 2.3432036318231058,
832
+ "byte_perplexity": 5.074281765515423,
833
+ "prompt_name": "null",
834
+ "task_name": "gsarti/flores_101_ben",
835
+ "word_perplexity": 2480418685142.412
836
+ }
837
+ },
838
+ "gsarti/flores_101_bos+null": {
839
+ "2022-07-14-10-03-25": {
840
+ "bits_per_byte": 2.665248069942796,
841
+ "byte_perplexity": 6.343363734045183,
842
+ "prompt_name": "null",
843
+ "task_name": "gsarti/flores_101_bos",
844
+ "word_perplexity": 229622.13691086147
845
+ }
846
+ },
847
+ "gsarti/flores_101_bul+null": {
848
+ "2022-07-14-10-03-25": {
849
+ "bits_per_byte": 1.5136770683283687,
850
+ "byte_perplexity": 2.8553687444403257,
851
+ "prompt_name": "null",
852
+ "task_name": "gsarti/flores_101_bul",
853
+ "word_perplexity": 194851.13344620814
854
+ }
855
+ },
856
+ "gsarti/flores_101_cat+null": {
857
+ "2022-07-14-10-03-25": {
858
+ "bits_per_byte": 1.2376904653775254,
859
+ "byte_perplexity": 2.358207169698056,
860
+ "prompt_name": "null",
861
+ "task_name": "gsarti/flores_101_cat",
862
+ "word_perplexity": 179.13123174533087
863
+ }
864
+ },
865
+ "gsarti/flores_101_ceb+null": {
866
+ "2022-07-14-10-03-25": {
867
+ "bits_per_byte": 2.7858604115174295,
868
+ "byte_perplexity": 6.896481056329736,
869
+ "prompt_name": "null",
870
+ "task_name": "gsarti/flores_101_ceb",
871
+ "word_perplexity": 113330.67154113152
872
+ }
873
+ },
874
+ "gsarti/flores_101_ces+null": {
875
+ "2022-07-14-10-03-25": {
876
+ "bits_per_byte": 2.6150694333085327,
877
+ "byte_perplexity": 6.126526835715164,
878
+ "prompt_name": "null",
879
+ "task_name": "gsarti/flores_101_ces",
880
+ "word_perplexity": 625101.1441414964
881
+ }
882
+ },
883
+ "gsarti/flores_101_ckb+null": {
884
+ "2022-07-14-12-00-55": {
885
+ "bits_per_byte": 1.9421776126623524,
886
+ "byte_perplexity": 3.842852526862475,
887
+ "prompt_name": "null",
888
+ "task_name": "gsarti/flores_101_ckb",
889
+ "word_perplexity": 11104497.438038943
890
+ }
891
+ },
892
+ "gsarti/flores_101_cym+null": {
893
+ "2022-07-14-12-00-55": {
894
+ "bits_per_byte": 3.8470317241534553,
895
+ "byte_perplexity": 14.390369428021707,
896
+ "prompt_name": "null",
897
+ "task_name": "gsarti/flores_101_cym",
898
+ "word_perplexity": 5900331.966242436
899
+ }
900
+ },
901
+ "gsarti/flores_101_dan+null": {
902
+ "2022-07-14-10-03-25": {
903
+ "bits_per_byte": 2.5307665257708245,
904
+ "byte_perplexity": 5.778786323448377,
905
+ "prompt_name": "null",
906
+ "task_name": "gsarti/flores_101_dan",
907
+ "word_perplexity": 71695.50336412797
908
+ }
909
+ },
910
+ "gsarti/flores_101_deu+null": {
911
+ "2022-07-14-10-03-25": {
912
+ "bits_per_byte": 1.7492158999678582,
913
+ "byte_perplexity": 3.361758059911202,
914
+ "prompt_name": "null",
915
+ "task_name": "gsarti/flores_101_deu",
916
+ "word_perplexity": 5647.282599404732
917
+ }
918
+ },
919
+ "gsarti/flores_101_ell+null": {
920
+ "2022-07-14-10-03-25": {
921
+ "bits_per_byte": 1.3862374641150543,
922
+ "byte_perplexity": 2.6139607239932805,
923
+ "prompt_name": "null",
924
+ "task_name": "gsarti/flores_101_ell",
925
+ "word_perplexity": 102751.5248402687
926
+ }
927
+ },
928
+ "gsarti/flores_101_eng+null": {
929
+ "2022-07-14-10-03-25": {
930
+ "bits_per_byte": 1.0435427545613876,
931
+ "byte_perplexity": 2.061283234268159,
932
+ "prompt_name": "null",
933
+ "task_name": "gsarti/flores_101_eng",
934
+ "word_perplexity": 75.56480997823662
935
+ }
936
+ },
937
+ "gsarti/flores_101_est+null": {
938
+ "2022-07-14-10-03-25": {
939
+ "bits_per_byte": 3.340809503762674,
940
+ "byte_perplexity": 10.131736127467489,
941
+ "prompt_name": "null",
942
+ "task_name": "gsarti/flores_101_est",
943
+ "word_perplexity": 92602633.82439691
944
+ }
945
+ },
946
+ "gsarti/flores_101_fas+null": {
947
+ "2022-07-14-12-00-55": {
948
+ "bits_per_byte": 1.6586730625582675,
949
+ "byte_perplexity": 3.1572599808371367,
950
+ "prompt_name": "null",
951
+ "task_name": "gsarti/flores_101_fas",
952
+ "word_perplexity": 59965.98383842629
953
+ }
954
+ },
955
+ "gsarti/flores_101_fin+null": {
956
+ "2022-07-14-10-03-25": {
957
+ "bits_per_byte": 2.9093822743068216,
958
+ "byte_perplexity": 7.5129644427067355,
959
+ "prompt_name": "null",
960
+ "task_name": "gsarti/flores_101_fin",
961
+ "word_perplexity": 91621886.60145952
962
+ }
963
+ },
964
+ "gsarti/flores_101_fra+null": {
965
+ "2022-07-14-10-03-25": {
966
+ "bits_per_byte": 1.0127395726746855,
967
+ "byte_perplexity": 2.0177390037335385,
968
+ "prompt_name": "null",
969
+ "task_name": "gsarti/flores_101_fra",
970
+ "word_perplexity": 89.45884576931464
971
+ }
972
+ },
973
+ "gsarti/flores_101_ful+null": {
974
+ "2022-07-14-10-03-25": {
975
+ "bits_per_byte": 3.561969238361191,
976
+ "byte_perplexity": 11.810263420287875,
977
+ "prompt_name": "null",
978
+ "task_name": "gsarti/flores_101_ful",
979
+ "word_perplexity": 908715.1423017589
980
+ }
981
+ },
982
+ "gsarti/flores_101_gle+null": {
983
+ "2022-07-14-10-03-25": {
984
+ "bits_per_byte": 3.2798070331865063,
985
+ "byte_perplexity": 9.712259930753122,
986
+ "prompt_name": "null",
987
+ "task_name": "gsarti/flores_101_gle",
988
+ "word_perplexity": 1548851.5929806433
989
+ }
990
+ },
991
+ "gsarti/flores_101_glg+null": {
992
+ "2022-07-14-10-03-25": {
993
+ "bits_per_byte": 1.6877168009728167,
994
+ "byte_perplexity": 3.2214647330840154,
995
+ "prompt_name": "null",
996
+ "task_name": "gsarti/flores_101_glg",
997
+ "word_perplexity": 1537.3193913761668
998
+ }
999
+ },
1000
+ "gsarti/flores_101_guj+null": {
1001
+ "2022-07-14-10-03-25": {
1002
+ "bits_per_byte": 2.357806609400009,
1003
+ "byte_perplexity": 5.125904532570054,
1004
+ "prompt_name": "null",
1005
+ "task_name": "gsarti/flores_101_guj",
1006
+ "word_perplexity": 133216198508.6925
1007
+ }
1008
+ },
1009
+ "gsarti/flores_101_hau+null": {
1010
+ "2022-07-14-10-03-25": {
1011
+ "bits_per_byte": 3.4659038057537184,
1012
+ "byte_perplexity": 11.049458818357667,
1013
+ "prompt_name": "null",
1014
+ "task_name": "gsarti/flores_101_hau",
1015
+ "word_perplexity": 730749.6449046461
1016
+ }
1017
+ },
1018
+ "gsarti/flores_101_heb+null": {
1019
+ "2022-07-14-10-03-25": {
1020
+ "bits_per_byte": 1.8889611054621571,
1021
+ "byte_perplexity": 3.7036842387723694,
1022
+ "prompt_name": "null",
1023
+ "task_name": "gsarti/flores_101_heb",
1024
+ "word_perplexity": 880255.4148832298
1025
+ }
1026
+ },
1027
+ "gsarti/flores_101_hin+null": {
1028
+ "2022-07-14-10-03-25": {
1029
+ "bits_per_byte": 2.195760704215568,
1030
+ "byte_perplexity": 4.581311639568996,
1031
+ "prompt_name": "null",
1032
+ "task_name": "gsarti/flores_101_hin",
1033
+ "word_perplexity": 453226793.5348556
1034
+ }
1035
+ },
1036
+ "gsarti/flores_101_hrv+null": {
1037
+ "2022-07-14-10-03-25": {
1038
+ "bits_per_byte": 2.7016816564307984,
1039
+ "byte_perplexity": 6.50559790827845,
1040
+ "prompt_name": "null",
1041
+ "task_name": "gsarti/flores_101_hrv",
1042
+ "word_perplexity": 307789.1462790266
1043
+ }
1044
+ },
1045
+ "gsarti/flores_101_hun+null": {
1046
+ "2022-07-14-10-03-25": {
1047
+ "bits_per_byte": 2.8470581600253615,
1048
+ "byte_perplexity": 7.19531655942431,
1049
+ "prompt_name": "null",
1050
+ "task_name": "gsarti/flores_101_hun",
1051
+ "word_perplexity": 8545882.19823639
1052
+ }
1053
+ },
1054
+ "gsarti/flores_101_hye+null": {
1055
+ "2022-07-14-10-03-25": {
1056
+ "bits_per_byte": 1.906169044483402,
1057
+ "byte_perplexity": 3.7481249397064547,
1058
+ "prompt_name": "null",
1059
+ "task_name": "gsarti/flores_101_hye",
1060
+ "word_perplexity": 99262887.01092263
1061
+ }
1062
+ },
1063
+ "gsarti/flores_101_ibo+null": {
1064
+ "2022-07-14-10-03-25": {
1065
+ "bits_per_byte": 2.6012385649422316,
1066
+ "byte_perplexity": 6.06807351892086,
1067
+ "prompt_name": "null",
1068
+ "task_name": "gsarti/flores_101_ibo",
1069
+ "word_perplexity": 99576.38125028457
1070
+ }
1071
+ },
1072
+ "gsarti/flores_101_ind+null": {
1073
+ "2022-07-14-10-03-25": {
1074
+ "bits_per_byte": 1.1501325666473412,
1075
+ "byte_perplexity": 2.2193428661828962,
1076
+ "prompt_name": "null",
1077
+ "task_name": "gsarti/flores_101_ind",
1078
+ "word_perplexity": 299.41864562936706
1079
+ }
1080
+ },
1081
+ "gsarti/flores_101_isl+null": {
1082
+ "2022-07-14-10-03-25": {
1083
+ "bits_per_byte": 3.1394769822824644,
1084
+ "byte_perplexity": 8.812045732299993,
1085
+ "prompt_name": "null",
1086
+ "task_name": "gsarti/flores_101_isl",
1087
+ "word_perplexity": 3947458.536983725
1088
+ }
1089
+ },
1090
+ "gsarti/flores_101_ita+null": {
1091
+ "2022-07-14-10-03-25": {
1092
+ "bits_per_byte": 1.695253347487448,
1093
+ "byte_perplexity": 3.238337491305615,
1094
+ "prompt_name": "null",
1095
+ "task_name": "gsarti/flores_101_ita",
1096
+ "word_perplexity": 1951.0663459405935
1097
+ }
1098
+ },
1099
+ "gsarti/flores_101_jav+null": {
1100
+ "2022-07-14-10-03-25": {
1101
+ "bits_per_byte": 2.899297993680408,
1102
+ "byte_perplexity": 7.460632752007581,
1103
+ "prompt_name": "null",
1104
+ "task_name": "gsarti/flores_101_jav",
1105
+ "word_perplexity": 956961.3940329206
1106
+ }
1107
+ },
1108
+ "gsarti/flores_101_jpn+null": {
1109
+ "2022-07-14-10-03-25": {
1110
+ "bits_per_byte": 1.539549942005635,
1111
+ "byte_perplexity": 2.907038023970581,
1112
+ "prompt_name": "null",
1113
+ "task_name": "gsarti/flores_101_jpn",
1114
+ "word_perplexity": 6.0024027118732196e+69
1115
+ }
1116
+ },
1117
+ "gsarti/flores_101_kam+null": {
1118
+ "2022-07-14-10-03-25": {
1119
+ "bits_per_byte": 3.515626316920499,
1120
+ "byte_perplexity": 11.436917146974627,
1121
+ "prompt_name": "null",
1122
+ "task_name": "gsarti/flores_101_kam",
1123
+ "word_perplexity": 4288601.196402131
1124
+ }
1125
+ },
1126
+ "gsarti/flores_101_kan+null": {
1127
+ "2022-07-14-10-03-25": {
1128
+ "bits_per_byte": 2.3991591199422513,
1129
+ "byte_perplexity": 5.274956219477929,
1130
+ "prompt_name": "null",
1131
+ "task_name": "gsarti/flores_101_kan",
1132
+ "word_perplexity": 5.3861539364992216e+16
1133
+ }
1134
+ },
1135
+ "gsarti/flores_101_kat+null": {
1136
+ "2022-07-14-10-03-25": {
1137
+ "bits_per_byte": 1.3325401608568794,
1138
+ "byte_perplexity": 2.5184571084900518,
1139
+ "prompt_name": "null",
1140
+ "task_name": "gsarti/flores_101_kat",
1141
+ "word_perplexity": 1133105340.614723
1142
+ }
1143
+ },
1144
+ "gsarti/flores_101_kaz+null": {
1145
+ "2022-07-14-10-03-25": {
1146
+ "bits_per_byte": 1.845791322405974,
1147
+ "byte_perplexity": 3.5945005448756477,
1148
+ "prompt_name": "null",
1149
+ "task_name": "gsarti/flores_101_kaz",
1150
+ "word_perplexity": 89537342.10068764
1151
+ }
1152
+ },
1153
+ "gsarti/flores_101_kea+null": {
1154
+ "2022-07-14-10-03-25": {
1155
+ "bits_per_byte": 3.2143692668645976,
1156
+ "byte_perplexity": 9.281572608888562,
1157
+ "prompt_name": "null",
1158
+ "task_name": "gsarti/flores_101_kea",
1159
+ "word_perplexity": 438558.0012817139
1160
+ }
1161
+ },
1162
+ "gsarti/flores_101_kir+null": {
1163
+ "2022-07-14-12-00-55": {
1164
+ "bits_per_byte": 1.9511242166700078,
1165
+ "byte_perplexity": 3.8667573034119127,
1166
+ "prompt_name": "null",
1167
+ "task_name": "gsarti/flores_101_kir",
1168
+ "word_perplexity": 235337758.18519488
1169
+ }
1170
+ },
1171
+ "gsarti/flores_101_kor+null": {
1172
+ "2022-07-14-12-00-55": {
1173
+ "bits_per_byte": 2.023500324792833,
1174
+ "byte_perplexity": 4.065690303705374,
1175
+ "prompt_name": "null",
1176
+ "task_name": "gsarti/flores_101_kor",
1177
+ "word_perplexity": 1684949.6449262113
1178
+ }
1179
+ },
1180
+ "gsarti/flores_101_lao+null": {
1181
+ "2022-07-14-12-00-55": {
1182
+ "bits_per_byte": 1.6376750107826055,
1183
+ "byte_perplexity": 3.1116396826339545,
1184
+ "prompt_name": "null",
1185
+ "task_name": "gsarti/flores_101_lao",
1186
+ "word_perplexity": 3.0817754157127624e+28
1187
+ }
1188
+ },
1189
+ "gsarti/flores_101_lav+null": {
1190
+ "2022-07-14-12-00-55": {
1191
+ "bits_per_byte": 3.075865182775687,
1192
+ "byte_perplexity": 8.431943399753028,
1193
+ "prompt_name": "null",
1194
+ "task_name": "gsarti/flores_101_lav",
1195
+ "word_perplexity": 20692036.880855087
1196
+ }
1197
+ },
1198
+ "gsarti/flores_101_lin+null": {
1199
+ "2022-07-14-12-00-55": {
1200
+ "bits_per_byte": 3.018221991102226,
1201
+ "byte_perplexity": 8.10168498947524,
1202
+ "prompt_name": "null",
1203
+ "task_name": "gsarti/flores_101_lin",
1204
+ "word_perplexity": 259077.7174090486
1205
+ }
1206
+ },
1207
+ "gsarti/flores_101_lit+null": {
1208
+ "2022-07-14-12-00-55": {
1209
+ "bits_per_byte": 3.0526165270213905,
1210
+ "byte_perplexity": 8.297153789252596,
1211
+ "prompt_name": "null",
1212
+ "task_name": "gsarti/flores_101_lit",
1213
+ "word_perplexity": 22011900.13997282
1214
+ }
1215
+ },
1216
+ "gsarti/flores_101_ltz+null": {
1217
+ "2022-07-14-12-00-55": {
1218
+ "bits_per_byte": 3.2407955989852377,
1219
+ "byte_perplexity": 9.453152958003827,
1220
+ "prompt_name": "null",
1221
+ "task_name": "gsarti/flores_101_ltz",
1222
+ "word_perplexity": 6731220.931729273
1223
+ }
1224
+ },
1225
+ "gsarti/flores_101_lug+null": {
1226
+ "2022-07-14-10-03-25": {
1227
+ "bits_per_byte": 3.2150119431528754,
1228
+ "byte_perplexity": 9.285708185212261,
1229
+ "prompt_name": "null",
1230
+ "task_name": "gsarti/flores_101_lug",
1231
+ "word_perplexity": 32046806.791237485
1232
+ }
1233
+ },
1234
+ "gsarti/flores_101_luo+null": {
1235
+ "2022-07-14-12-00-55": {
1236
+ "bits_per_byte": 3.609093857404177,
1237
+ "byte_perplexity": 12.202407052163576,
1238
+ "prompt_name": "null",
1239
+ "task_name": "gsarti/flores_101_luo",
1240
+ "word_perplexity": 1485111.1306447538
1241
+ }
1242
+ },
1243
+ "gsarti/flores_101_mal+null": {
1244
+ "2022-07-14-12-00-55": {
1245
+ "bits_per_byte": 2.1588237245178132,
1246
+ "byte_perplexity": 4.465506197375413,
1247
+ "prompt_name": "null",
1248
+ "task_name": "gsarti/flores_101_mal",
1249
+ "word_perplexity": 4.8990954217696134e+17
1250
+ }
1251
+ },
1252
+ "gsarti/flores_101_mar+null": {
1253
+ "2022-07-14-12-00-55": {
1254
+ "bits_per_byte": 2.454064685835334,
1255
+ "byte_perplexity": 5.479577601103449,
1256
+ "prompt_name": "null",
1257
+ "task_name": "gsarti/flores_101_mar",
1258
+ "word_perplexity": 53348101396468.1
1259
+ }
1260
+ },
1261
+ "gsarti/flores_101_mkd+null": {
1262
+ "2022-07-14-12-00-55": {
1263
+ "bits_per_byte": 1.6388651004482695,
1264
+ "byte_perplexity": 3.11420755589491,
1265
+ "prompt_name": "null",
1266
+ "task_name": "gsarti/flores_101_mkd",
1267
+ "word_perplexity": 513306.31562258815
1268
+ }
1269
+ },
1270
+ "gsarti/flores_101_mlt+null": {
1271
+ "2022-07-14-12-00-55": {
1272
+ "bits_per_byte": 4.014730236310589,
1273
+ "byte_perplexity": 16.164200382975334,
1274
+ "prompt_name": "null",
1275
+ "task_name": "gsarti/flores_101_mlt",
1276
+ "word_perplexity": 3271065298.9525104
1277
+ }
1278
+ },
1279
+ "gsarti/flores_101_mon+null": {
1280
+ "2022-07-14-12-00-55": {
1281
+ "bits_per_byte": 1.8368760183021453,
1282
+ "byte_perplexity": 3.5723563966116956,
1283
+ "prompt_name": "null",
1284
+ "task_name": "gsarti/flores_101_mon",
1285
+ "word_perplexity": 11967156.496346941
1286
+ }
1287
+ },
1288
+ "gsarti/flores_101_mri+null": {
1289
+ "2022-07-14-12-00-55": {
1290
+ "bits_per_byte": 3.037967287223778,
1291
+ "byte_perplexity": 8.213330128288407,
1292
+ "prompt_name": "null",
1293
+ "task_name": "gsarti/flores_101_mri",
1294
+ "word_perplexity": 42667.84366725716
1295
+ }
1296
+ },
1297
+ "gsarti/flores_101_msa+null": {
1298
+ "2022-07-14-12-00-55": {
1299
+ "bits_per_byte": 1.4109363519680242,
1300
+ "byte_perplexity": 2.659096901190639,
1301
+ "prompt_name": "null",
1302
+ "task_name": "gsarti/flores_101_msa",
1303
+ "word_perplexity": 1188.7251531670374
1304
+ }
1305
+ },
1306
+ "gsarti/flores_101_mya+null": {
1307
+ "2022-07-14-10-03-25": {
1308
+ "bits_per_byte": 1.4101030557435918,
1309
+ "byte_perplexity": 2.657561458464019,
1310
+ "prompt_name": "null",
1311
+ "task_name": "gsarti/flores_101_mya",
1312
+ "word_perplexity": 5.887577237013639e+18
1313
+ }
1314
+ },
1315
+ "gsarti/flores_101_nld+null": {
1316
+ "2022-07-14-10-03-25": {
1317
+ "bits_per_byte": 2.1813098607926804,
1318
+ "byte_perplexity": 4.535651709856251,
1319
+ "prompt_name": "null",
1320
+ "task_name": "gsarti/flores_101_nld",
1321
+ "word_perplexity": 13951.877058430618
1322
+ }
1323
+ },
1324
+ "gsarti/flores_101_nob+null": {
1325
+ "2022-07-14-12-00-55": {
1326
+ "bits_per_byte": 2.561165630453858,
1327
+ "byte_perplexity": 5.901843358131797,
1328
+ "prompt_name": "null",
1329
+ "task_name": "gsarti/flores_101_nob",
1330
+ "word_perplexity": 64134.3587194621
1331
+ }
1332
+ },
1333
+ "gsarti/flores_101_npi+null": {
1334
+ "2022-07-14-12-00-55": {
1335
+ "bits_per_byte": 2.361386302448311,
1336
+ "byte_perplexity": 5.138638996619111,
1337
+ "prompt_name": "null",
1338
+ "task_name": "gsarti/flores_101_npi",
1339
+ "word_perplexity": 7452421298650.788
1340
+ }
1341
+ },
1342
+ "gsarti/flores_101_nso+null": {
1343
+ "2022-07-14-12-00-55": {
1344
+ "bits_per_byte": 3.150046187635368,
1345
+ "byte_perplexity": 8.876839962509171,
1346
+ "prompt_name": "null",
1347
+ "task_name": "gsarti/flores_101_nso",
1348
+ "word_perplexity": 133251.3907730927
1349
+ }
1350
+ },
1351
+ "gsarti/flores_101_nya+null": {
1352
+ "2022-07-14-12-00-55": {
1353
+ "bits_per_byte": 3.166160871838487,
1354
+ "byte_perplexity": 8.97654874419086,
1355
+ "prompt_name": "null",
1356
+ "task_name": "gsarti/flores_101_nya",
1357
+ "word_perplexity": 13237249.320560299
1358
+ }
1359
+ },
1360
+ "gsarti/flores_101_oci+null": {
1361
+ "2022-07-14-12-00-55": {
1362
+ "bits_per_byte": 2.3544826611123932,
1363
+ "byte_perplexity": 5.114108118049416,
1364
+ "prompt_name": "null",
1365
+ "task_name": "gsarti/flores_101_oci",
1366
+ "word_perplexity": 29786.57326210068
1367
+ }
1368
+ },
1369
+ "gsarti/flores_101_orm+null": {
1370
+ "2022-07-14-12-00-55": {
1371
+ "bits_per_byte": 3.7457001993717243,
1372
+ "byte_perplexity": 13.414303089263644,
1373
+ "prompt_name": "null",
1374
+ "task_name": "gsarti/flores_101_orm",
1375
+ "word_perplexity": 1286222337.8393624
1376
+ }
1377
+ },
1378
+ "gsarti/flores_101_ory+null": {
1379
+ "2022-07-14-12-00-55": {
1380
+ "bits_per_byte": 2.3466784891528936,
1381
+ "byte_perplexity": 5.086518347981296,
1382
+ "prompt_name": "null",
1383
+ "task_name": "gsarti/flores_101_ory",
1384
+ "word_perplexity": 8232620282886.167
1385
+ }
1386
+ },
1387
+ "gsarti/flores_101_pan+null": {
1388
+ "2022-07-14-12-00-55": {
1389
+ "bits_per_byte": 2.3255600077385723,
1390
+ "byte_perplexity": 5.012603107956229,
1391
+ "prompt_name": "null",
1392
+ "task_name": "gsarti/flores_101_pan",
1393
+ "word_perplexity": 2003582065.835696
1394
+ }
1395
+ },
1396
+ "gsarti/flores_101_pol+null": {
1397
+ "2022-07-14-12-00-55": {
1398
+ "bits_per_byte": 2.3688414865658434,
1399
+ "byte_perplexity": 5.165261846492578,
1400
+ "prompt_name": "null",
1401
+ "task_name": "gsarti/flores_101_pol",
1402
+ "word_perplexity": 239703.75452947227
1403
+ }
1404
+ },
1405
+ "gsarti/flores_101_por+null": {
1406
+ "2022-07-14-12-00-55": {
1407
+ "bits_per_byte": 1.0087385096181816,
1408
+ "byte_perplexity": 2.012150908931838,
1409
+ "prompt_name": "null",
1410
+ "task_name": "gsarti/flores_101_por",
1411
+ "word_perplexity": 78.66129921108659
1412
+ }
1413
+ },
1414
+ "gsarti/flores_101_pus+null": {
1415
+ "2022-07-14-12-00-55": {
1416
+ "bits_per_byte": 2.2173729850313615,
1417
+ "byte_perplexity": 4.650458574106675,
1418
+ "prompt_name": "null",
1419
+ "task_name": "gsarti/flores_101_pus",
1420
+ "word_perplexity": 200303.57214724104
1421
+ }
1422
+ },
1423
+ "gsarti/flores_101_ron+null": {
1424
+ "2022-07-14-12-00-55": {
1425
+ "bits_per_byte": 2.486356022105963,
1426
+ "byte_perplexity": 5.603607947317877,
1427
+ "prompt_name": "null",
1428
+ "task_name": "gsarti/flores_101_ron",
1429
+ "word_perplexity": 80490.92705368399
1430
+ }
1431
+ },
1432
+ "gsarti/flores_101_rus+null": {
1433
+ "2022-07-14-12-00-55": {
1434
+ "bits_per_byte": 1.095728414417906,
1435
+ "byte_perplexity": 2.1372096174466697,
1436
+ "prompt_name": "null",
1437
+ "task_name": "gsarti/flores_101_rus",
1438
+ "word_perplexity": 22038.65288574451
1439
+ }
1440
+ },
1441
+ "gsarti/flores_101_slk+null": {
1442
+ "2022-07-14-12-00-55": {
1443
+ "bits_per_byte": 2.8667803584469502,
1444
+ "byte_perplexity": 7.294354718439043,
1445
+ "prompt_name": "null",
1446
+ "task_name": "gsarti/flores_101_slk",
1447
+ "word_perplexity": 1873211.2703176092
1448
+ }
1449
+ },
1450
+ "gsarti/flores_101_slv+null": {
1451
+ "2022-07-14-12-00-55": {
1452
+ "bits_per_byte": 2.894935550489075,
1453
+ "byte_perplexity": 7.438107250941839,
1454
+ "prompt_name": "null",
1455
+ "task_name": "gsarti/flores_101_slv",
1456
+ "word_perplexity": 609965.8362492598
1457
+ }
1458
+ },
1459
+ "gsarti/flores_101_sna+null": {
1460
+ "2022-07-14-12-00-55": {
1461
+ "bits_per_byte": 3.226698783453375,
1462
+ "byte_perplexity": 9.361234419948593,
1463
+ "prompt_name": "null",
1464
+ "task_name": "gsarti/flores_101_sna",
1465
+ "word_perplexity": 151658287.08006003
1466
+ }
1467
+ },
1468
+ "gsarti/flores_101_snd+null": {
1469
+ "2022-07-14-12-00-55": {
1470
+ "bits_per_byte": 2.505484320885354,
1471
+ "byte_perplexity": 5.678399375652783,
1472
+ "prompt_name": "null",
1473
+ "task_name": "gsarti/flores_101_snd",
1474
+ "word_perplexity": 2195879.0537875695
1475
+ }
1476
+ },
1477
+ "gsarti/flores_101_som+null": {
1478
+ "2022-07-14-12-00-55": {
1479
+ "bits_per_byte": 3.6579492747174616,
1480
+ "byte_perplexity": 12.622705630414286,
1481
+ "prompt_name": "null",
1482
+ "task_name": "gsarti/flores_101_som",
1483
+ "word_perplexity": 12921970.127169678
1484
+ }
1485
+ },
1486
+ "gsarti/flores_101_spa+null": {
1487
+ "2022-07-14-12-00-55": {
1488
+ "bits_per_byte": 0.9441289779054047,
1489
+ "byte_perplexity": 1.9240269109386998,
1490
+ "prompt_name": "null",
1491
+ "task_name": "gsarti/flores_101_spa",
1492
+ "word_perplexity": 55.14408503293887
1493
+ }
1494
+ },
1495
+ "gsarti/flores_101_srp+null": {
1496
+ "2022-07-14-12-00-55": {
1497
+ "bits_per_byte": 1.6091583939601046,
1498
+ "byte_perplexity": 3.050738229673983,
1499
+ "prompt_name": "null",
1500
+ "task_name": "gsarti/flores_101_srp",
1501
+ "word_perplexity": 359037.4163692842
1502
+ }
1503
+ },
1504
+ "gsarti/flores_101_swe+null": {
1505
+ "2022-07-14-12-00-55": {
1506
+ "bits_per_byte": 2.4943222333483153,
1507
+ "byte_perplexity": 5.634635291846611,
1508
+ "prompt_name": "null",
1509
+ "task_name": "gsarti/flores_101_swe",
1510
+ "word_perplexity": 104567.9891705103
1511
+ }
1512
+ },
1513
+ "gsarti/flores_101_swh+null": {
1514
+ "2022-07-14-12-00-55": {
1515
+ "bits_per_byte": 1.9721156771582438,
1516
+ "byte_perplexity": 3.923430589092355,
1517
+ "prompt_name": "null",
1518
+ "task_name": "gsarti/flores_101_swh",
1519
+ "word_perplexity": 6985.646204087442
1520
+ }
1521
+ },
1522
+ "gsarti/flores_101_tam+null": {
1523
+ "2022-07-14-12-00-55": {
1524
+ "bits_per_byte": 2.0999329236632325,
1525
+ "byte_perplexity": 4.286894531607389,
1526
+ "prompt_name": "null",
1527
+ "task_name": "gsarti/flores_101_tam",
1528
+ "word_perplexity": 4220234444737767.0
1529
+ }
1530
+ },
1531
+ "gsarti/flores_101_tel+null": {
1532
+ "2022-07-14-12-00-55": {
1533
+ "bits_per_byte": 2.549014618212334,
1534
+ "byte_perplexity": 5.852344181819556,
1535
+ "prompt_name": "null",
1536
+ "task_name": "gsarti/flores_101_tel",
1537
+ "word_perplexity": 7315913985648022.0
1538
+ }
1539
+ },
1540
+ "gsarti/flores_101_tgk+null": {
1541
+ "2022-07-14-12-00-55": {
1542
+ "bits_per_byte": 1.9399053923480125,
1543
+ "byte_perplexity": 3.836804862794101,
1544
+ "prompt_name": "null",
1545
+ "task_name": "gsarti/flores_101_tgk",
1546
+ "word_perplexity": 10003619.893239152
1547
+ }
1548
+ },
1549
+ "gsarti/flores_101_tgl+null": {
1550
+ "2022-07-14-10-03-25": {
1551
+ "bits_per_byte": 2.645461413001105,
1552
+ "byte_perplexity": 6.256957969905079,
1553
+ "prompt_name": "null",
1554
+ "task_name": "gsarti/flores_101_tgl",
1555
+ "word_perplexity": 87554.31770184237
1556
+ }
1557
+ },
1558
+ "gsarti/flores_101_tha+null": {
1559
+ "2022-07-14-12-00-55": {
1560
+ "bits_per_byte": 1.2979178211163922,
1561
+ "byte_perplexity": 2.458737675753546,
1562
+ "prompt_name": "null",
1563
+ "task_name": "gsarti/flores_101_tha",
1564
+ "word_perplexity": 6.85384626099906e+32
1565
+ }
1566
+ },
1567
+ "gsarti/flores_101_tur+null": {
1568
+ "2022-07-14-12-00-55": {
1569
+ "bits_per_byte": 2.4123830232149,
1570
+ "byte_perplexity": 5.323529328304652,
1571
+ "prompt_name": "null",
1572
+ "task_name": "gsarti/flores_101_tur",
1573
+ "word_perplexity": 1230000.8194755162
1574
+ }
1575
+ },
1576
+ "gsarti/flores_101_ukr+null": {
1577
+ "2022-07-14-12-00-55": {
1578
+ "bits_per_byte": 1.5282644195953918,
1579
+ "byte_perplexity": 2.8843863497020608,
1580
+ "prompt_name": "null",
1581
+ "task_name": "gsarti/flores_101_ukr",
1582
+ "word_perplexity": 780615.9486315987
1583
+ }
1584
+ },
1585
+ "gsarti/flores_101_umb+null": {
1586
+ "2022-07-14-12-00-55": {
1587
+ "bits_per_byte": 3.710219475046473,
1588
+ "byte_perplexity": 13.088423907901921,
1589
+ "prompt_name": "null",
1590
+ "task_name": "gsarti/flores_101_umb",
1591
+ "word_perplexity": 346118506.64866126
1592
+ }
1593
+ },
1594
+ "gsarti/flores_101_urd+null": {
1595
+ "2022-07-14-12-00-55": {
1596
+ "bits_per_byte": 1.0075988539165108,
1597
+ "byte_perplexity": 2.010562039704537,
1598
+ "prompt_name": "null",
1599
+ "task_name": "gsarti/flores_101_urd",
1600
+ "word_perplexity": 335.1943886252716
1601
+ }
1602
+ },
1603
+ "gsarti/flores_101_uzb+null": {
1604
+ "2022-07-14-12-00-55": {
1605
+ "bits_per_byte": 3.69831120498359,
1606
+ "byte_perplexity": 12.980834294137205,
1607
+ "prompt_name": "null",
1608
+ "task_name": "gsarti/flores_101_uzb",
1609
+ "word_perplexity": 1248263505.2751954
1610
+ }
1611
+ },
1612
+ "gsarti/flores_101_vie+null": {
1613
+ "2022-07-14-12-00-55": {
1614
+ "bits_per_byte": 0.8461114961807352,
1615
+ "byte_perplexity": 1.7976491760484148,
1616
+ "prompt_name": "null",
1617
+ "task_name": "gsarti/flores_101_vie",
1618
+ "word_perplexity": 33.51752264232948
1619
+ }
1620
+ },
1621
+ "gsarti/flores_101_wol+null": {
1622
+ "2022-07-14-12-00-55": {
1623
+ "bits_per_byte": 3.332383415073327,
1624
+ "byte_perplexity": 10.072733993132132,
1625
+ "prompt_name": "null",
1626
+ "task_name": "gsarti/flores_101_wol",
1627
+ "word_perplexity": 199684.7010180392
1628
+ }
1629
+ },
1630
+ "gsarti/flores_101_xho+null": {
1631
+ "2022-07-14-12-00-55": {
1632
+ "bits_per_byte": 3.0428982143908727,
1633
+ "byte_perplexity": 8.241450154294917,
1634
+ "prompt_name": "null",
1635
+ "task_name": "gsarti/flores_101_xho",
1636
+ "word_perplexity": 141017733.33017766
1637
+ }
1638
+ },
1639
+ "gsarti/flores_101_yor+null": {
1640
+ "2022-07-14-12-00-55": {
1641
+ "bits_per_byte": 2.62429549091613,
1642
+ "byte_perplexity": 6.165831615133067,
1643
+ "prompt_name": "null",
1644
+ "task_name": "gsarti/flores_101_yor",
1645
+ "word_perplexity": 171980.641422536
1646
+ }
1647
+ },
1648
+ "gsarti/flores_101_zho_simpl+null": {
1649
+ "2022-07-14-10-03-25": {
1650
+ "bits_per_byte": 1.2156521449449949,
1651
+ "byte_perplexity": 2.322457417595381,
1652
+ "prompt_name": "null",
1653
+ "task_name": "gsarti/flores_101_zho_simpl",
1654
+ "word_perplexity": 1.0554528210220222e+21
1655
+ }
1656
+ },
1657
+ "gsarti/flores_101_zho_trad+null": {
1658
+ "2022-07-14-10-03-25": {
1659
+ "bits_per_byte": 1.3622834584784203,
1660
+ "byte_perplexity": 2.5709177552415134,
1661
+ "prompt_name": "null",
1662
+ "task_name": "gsarti/flores_101_zho_trad",
1663
+ "word_perplexity": 4.787781515987923e+24
1664
+ }
1665
+ },
1666
+ "gsarti/flores_101_zul+null": {
1667
+ "2022-07-14-12-00-55": {
1668
+ "bits_per_byte": 3.2020451216662975,
1669
+ "byte_perplexity": 9.202622963132773,
1670
+ "prompt_name": "null",
1671
+ "task_name": "gsarti/flores_101_zul",
1672
+ "word_perplexity": 998742068.9481835
1673
+ }
1674
+ },
1675
+ "headqa": {
1676
+ "2022-07-13-11-29-13": {
1677
+ "acc": 0.25419401896425964,
1678
+ "acc_norm": 0.29576951130561635,
1679
+ "acc_norm_stderr": 0.008717251898361422,
1680
+ "acc_stderr": 0.008316509290190668
1681
+ }
1682
+ },
1683
+ "hellaswag": {
1684
+ "2022-07-13-11-29-13": {
1685
+ "acc": 0.37621987651862177,
1686
+ "acc_norm": 0.46564429396534557,
1687
+ "acc_norm_stderr": 0.004977988452502641,
1688
+ "acc_stderr": 0.004834461997944872
1689
+ }
1690
+ },
1691
+ "lambada": {
1692
+ "2022-07-13-11-29-13": {
1693
+ "acc": 0.46322530564719583,
1694
+ "acc_stderr": 0.006947110835634445,
1695
+ "ppl": 12.583447597222621,
1696
+ "ppl_stderr": 0.4021518609838198
1697
+ }
1698
+ },
1699
+ "logiqa": {
1700
+ "2022-07-13-11-29-13": {
1701
+ "acc": 0.21658986175115208,
1702
+ "acc_norm": 0.28110599078341014,
1703
+ "acc_norm_stderr": 0.017632374626460005,
1704
+ "acc_stderr": 0.016156860583178303
1705
+ }
1706
+ },
1707
+ "mathqa": {
1708
+ "2022-07-13-11-29-13": {
1709
+ "acc": 0.2489112227805695,
1710
+ "acc_norm": 0.2422110552763819,
1711
+ "acc_norm_stderr": 0.007842810183504986,
1712
+ "acc_stderr": 0.007915319798861361
1713
+ }
1714
+ },
1715
+ "mc_taco": {
1716
+ "2022-07-13-11-29-13": {
1717
+ "em": 0.12537537537537538,
1718
+ "f1": 0.4747075325110886
1719
+ }
1720
+ },
1721
+ "mnli+GPT-3 style": {
1722
+ "2022-07-12-22-45-57": {
1723
+ "acc": 0.3564951604686704,
1724
+ "acc_norm": 0.335303107488538,
1725
+ "acc_norm_stderr": 0.004765490263584639,
1726
+ "acc_stderr": 0.004834813222301984,
1727
+ "prompt_name": "GPT-3 style",
1728
+ "task_name": "mnli"
1729
+ }
1730
+ },
1731
+ "mnli+MNLI crowdsource": {
1732
+ "2022-07-12-22-45-57": {
1733
+ "acc": 0.3548650025471218,
1734
+ "acc_norm": 0.37982679572083544,
1735
+ "acc_norm_stderr": 0.004899212442097964,
1736
+ "acc_stderr": 0.004829852406948984,
1737
+ "prompt_name": "MNLI crowdsource",
1738
+ "task_name": "mnli"
1739
+ }
1740
+ },
1741
+ "mnli+always/sometimes/never": {
1742
+ "2022-07-12-22-45-57": {
1743
+ "acc": 0.31920529801324504,
1744
+ "acc_norm": 0.31818644931227713,
1745
+ "acc_norm_stderr": 0.004701653585969693,
1746
+ "acc_stderr": 0.004705655206722177,
1747
+ "prompt_name": "always/sometimes/never",
1748
+ "task_name": "mnli"
1749
+ }
1750
+ },
1751
+ "mnli+based on the previous passage": {
1752
+ "2022-07-12-22-45-57": {
1753
+ "acc": 0.34070300560366784,
1754
+ "acc_norm": 0.33245033112582784,
1755
+ "acc_norm_stderr": 0.004755346314564714,
1756
+ "acc_stderr": 0.004784157883834768,
1757
+ "prompt_name": "based on the previous passage",
1758
+ "task_name": "mnli"
1759
+ }
1760
+ },
1761
+ "mnli+can we infer": {
1762
+ "2022-07-12-22-45-57": {
1763
+ "acc": 0.36271013754457465,
1764
+ "acc_norm": 0.3392766174223128,
1765
+ "acc_norm_stderr": 0.004779294320017342,
1766
+ "acc_stderr": 0.004853167998709484,
1767
+ "prompt_name": "can we infer",
1768
+ "task_name": "mnli"
1769
+ }
1770
+ },
1771
+ "mnli+claim true/false/inconclusive": {
1772
+ "2022-07-12-22-45-57": {
1773
+ "acc": 0.35384615384615387,
1774
+ "acc_norm": 0.3169638308711156,
1775
+ "acc_norm_stderr": 0.004696817414398099,
1776
+ "acc_stderr": 0.004826720820135633,
1777
+ "prompt_name": "claim true/false/inconclusive",
1778
+ "task_name": "mnli"
1779
+ }
1780
+ },
1781
+ "mnli+consider always/sometimes/never": {
1782
+ "2022-07-12-22-45-57": {
1783
+ "acc": 0.3183902190524707,
1784
+ "acc_norm": 0.31818644931227713,
1785
+ "acc_norm_stderr": 0.004701653585969693,
1786
+ "acc_stderr": 0.004702455981984395,
1787
+ "prompt_name": "consider always/sometimes/never",
1788
+ "task_name": "mnli"
1789
+ }
1790
+ },
1791
+ "mnli+does it follow that": {
1792
+ "2022-07-12-22-45-57": {
1793
+ "acc": 0.3784004075394804,
1794
+ "acc_norm": 0.3499745287824758,
1795
+ "acc_norm_stderr": 0.004814601860231488,
1796
+ "acc_stderr": 0.00489562485968904,
1797
+ "prompt_name": "does it follow that",
1798
+ "task_name": "mnli"
1799
+ }
1800
+ },
1801
+ "mnli+does this imply": {
1802
+ "2022-07-12-22-45-57": {
1803
+ "acc": 0.33224656138563424,
1804
+ "acc_norm": 0.31920529801324504,
1805
+ "acc_norm_stderr": 0.004705655206722178,
1806
+ "acc_stderr": 0.004754614244749308,
1807
+ "prompt_name": "does this imply",
1808
+ "task_name": "mnli"
1809
+ }
1810
+ },
1811
+ "mnli+guaranteed true": {
1812
+ "2022-07-12-22-45-57": {
1813
+ "acc": 0.35731023942944473,
1814
+ "acc_norm": 0.3398879266428935,
1815
+ "acc_norm_stderr": 0.004781384619510542,
1816
+ "acc_stderr": 0.004837270730680468,
1817
+ "prompt_name": "guaranteed true",
1818
+ "task_name": "mnli"
1819
+ }
1820
+ },
1821
+ "mnli+guaranteed/possible/impossible": {
1822
+ "2022-07-12-22-45-57": {
1823
+ "acc": 0.32317880794701986,
1824
+ "acc_norm": 0.3390728476821192,
1825
+ "acc_norm_stderr": 0.004778595579555236,
1826
+ "acc_stderr": 0.004721015048648592,
1827
+ "prompt_name": "guaranteed/possible/impossible",
1828
+ "task_name": "mnli"
1829
+ }
1830
+ },
1831
+ "mnli+justified in saying": {
1832
+ "2022-07-12-22-45-57": {
1833
+ "acc": 0.3611818644931228,
1834
+ "acc_norm": 0.33438614365766683,
1835
+ "acc_norm_stderr": 0.004762251055102503,
1836
+ "acc_stderr": 0.0048487362318538965,
1837
+ "prompt_name": "justified in saying",
1838
+ "task_name": "mnli"
1839
+ }
1840
+ },
1841
+ "mnli+must be true": {
1842
+ "2022-07-12-22-45-57": {
1843
+ "acc": 0.3532348446255731,
1844
+ "acc_norm": 0.3400916963830871,
1845
+ "acc_norm_stderr": 0.004782079413482068,
1846
+ "acc_stderr": 0.004824830369595005,
1847
+ "prompt_name": "must be true",
1848
+ "task_name": "mnli"
1849
+ }
1850
+ },
1851
+ "mnli+should assume": {
1852
+ "2022-07-12-22-45-57": {
1853
+ "acc": 0.3532348446255731,
1854
+ "acc_norm": 0.32236372898624555,
1855
+ "acc_norm_stderr": 0.004717896188851781,
1856
+ "acc_stderr": 0.004824830369595005,
1857
+ "prompt_name": "should assume",
1858
+ "task_name": "mnli"
1859
+ }
1860
+ },
1861
+ "mnli+take the following as truth": {
1862
+ "2022-07-12-22-45-57": {
1863
+ "acc": 0.3540499235863474,
1864
+ "acc_norm": 0.32654100866021396,
1865
+ "acc_norm_stderr": 0.004733707466562015,
1866
+ "acc_stderr": 0.004827349052909375,
1867
+ "prompt_name": "take the following as truth",
1868
+ "task_name": "mnli"
1869
+ }
1870
+ },
1871
+ "mnli_mismatched+GPT-3 style": {
1872
+ "2022-07-12-22-45-57": {
1873
+ "acc": 0.3558787632221318,
1874
+ "acc_norm": 0.3365541090317331,
1875
+ "acc_norm_stderr": 0.0047657510794410825,
1876
+ "acc_stderr": 0.004828764189286043,
1877
+ "prompt_name": "GPT-3 style",
1878
+ "task_name": "mnli_mismatched"
1879
+ }
1880
+ },
1881
+ "mnli_mismatched+MNLI crowdsource": {
1882
+ "2022-07-12-22-45-57": {
1883
+ "acc": 0.3524206672091131,
1884
+ "acc_norm": 0.3876118795768918,
1885
+ "acc_norm_stderr": 0.004913750149712027,
1886
+ "acc_stderr": 0.004818127922877737,
1887
+ "prompt_name": "MNLI crowdsource",
1888
+ "task_name": "mnli_mismatched"
1889
+ }
1890
+ },
1891
+ "mnli_mismatched+always/sometimes/never": {
1892
+ "2022-07-12-22-45-57": {
1893
+ "acc": 0.3187550854353133,
1894
+ "acc_norm": 0.318246541903987,
1895
+ "acc_norm_stderr": 0.004697823254367764,
1896
+ "acc_stderr": 0.004699821349212815,
1897
+ "prompt_name": "always/sometimes/never",
1898
+ "task_name": "mnli_mismatched"
1899
+ }
1900
+ },
1901
+ "mnli_mismatched+based on the previous passage": {
1902
+ "2022-07-12-22-45-57": {
1903
+ "acc": 0.3442839707078926,
1904
+ "acc_norm": 0.3240439381611066,
1905
+ "acc_norm_stderr": 0.00472022103875238,
1906
+ "acc_stderr": 0.004792007109263922,
1907
+ "prompt_name": "based on the previous passage",
1908
+ "task_name": "mnli_mismatched"
1909
+ }
1910
+ },
1911
+ "mnli_mismatched+can we infer": {
1912
+ "2022-07-12-22-45-57": {
1913
+ "acc": 0.3628966639544345,
1914
+ "acc_norm": 0.33909682668836455,
1915
+ "acc_norm_stderr": 0.0047745443668395,
1916
+ "acc_stderr": 0.004849506876045877,
1917
+ "prompt_name": "can we infer",
1918
+ "task_name": "mnli_mismatched"
1919
+ }
1920
+ },
1921
+ "mnli_mismatched+claim true/false/inconclusive": {
1922
+ "2022-07-12-22-45-57": {
1923
+ "acc": 0.3517087062652563,
1924
+ "acc_norm": 0.31550040683482505,
1925
+ "acc_norm_stderr": 0.004686921836958016,
1926
+ "acc_stderr": 0.004815903833418159,
1927
+ "prompt_name": "claim true/false/inconclusive",
1928
+ "task_name": "mnli_mismatched"
1929
+ }
1930
+ },
1931
+ "mnli_mismatched+consider always/sometimes/never": {
1932
+ "2022-07-12-22-45-57": {
1933
+ "acc": 0.318246541903987,
1934
+ "acc_norm": 0.318246541903987,
1935
+ "acc_norm_stderr": 0.004697823254367764,
1936
+ "acc_stderr": 0.004697823254367764,
1937
+ "prompt_name": "consider always/sometimes/never",
1938
+ "task_name": "mnli_mismatched"
1939
+ }
1940
+ },
1941
+ "mnli_mismatched+does it follow that": {
1942
+ "2022-07-12-22-45-57": {
1943
+ "acc": 0.38923921887713586,
1944
+ "acc_norm": 0.34926769731489016,
1945
+ "acc_norm_stderr": 0.004808189163919754,
1946
+ "acc_stderr": 0.004917507365149974,
1947
+ "prompt_name": "does it follow that",
1948
+ "task_name": "mnli_mismatched"
1949
+ }
1950
+ },
1951
+ "mnli_mismatched+does this imply": {
1952
+ "2022-07-12-22-45-57": {
1953
+ "acc": 0.3233319772172498,
1954
+ "acc_norm": 0.3184499593165175,
1955
+ "acc_norm_stderr": 0.0046986232661144,
1956
+ "acc_stderr": 0.0047175151956513625,
1957
+ "prompt_name": "does this imply",
1958
+ "task_name": "mnli_mismatched"
1959
+ }
1960
+ },
1961
+ "mnli_mismatched+guaranteed true": {
1962
+ "2022-07-12-22-45-57": {
1963
+ "acc": 0.36208299430431246,
1964
+ "acc_norm": 0.3303498779495525,
1965
+ "acc_norm_stderr": 0.004743645253038162,
1966
+ "acc_stderr": 0.00484715944530685,
1967
+ "prompt_name": "guaranteed true",
1968
+ "task_name": "mnli_mismatched"
1969
+ }
1970
+ },
1971
+ "mnli_mismatched+guaranteed/possible/impossible": {
1972
+ "2022-07-12-22-45-57": {
1973
+ "acc": 0.32048413344182264,
1974
+ "acc_norm": 0.33848657445077296,
1975
+ "acc_norm_stderr": 0.004772448023078353,
1976
+ "acc_stderr": 0.004706566719294992,
1977
+ "prompt_name": "guaranteed/possible/impossible",
1978
+ "task_name": "mnli_mismatched"
1979
+ }
1980
+ },
1981
+ "mnli_mismatched+justified in saying": {
1982
+ "2022-07-12-22-45-57": {
1983
+ "acc": 0.371033360455655,
1984
+ "acc_norm": 0.32648494711147275,
1985
+ "acc_norm_stderr": 0.004729403696523803,
1986
+ "acc_stderr": 0.004872158826748743,
1987
+ "prompt_name": "justified in saying",
1988
+ "task_name": "mnli_mismatched"
1989
+ }
1990
+ },
1991
+ "mnli_mismatched+must be true": {
1992
+ "2022-07-12-22-45-57": {
1993
+ "acc": 0.3565907241659886,
1994
+ "acc_norm": 0.3373677786818552,
1995
+ "acc_norm_stderr": 0.004768581700693004,
1996
+ "acc_stderr": 0.004830919845456573,
1997
+ "prompt_name": "must be true",
1998
+ "task_name": "mnli_mismatched"
1999
+ }
2000
+ },
2001
+ "mnli_mismatched+should assume": {
2002
+ "2022-07-12-22-45-57": {
2003
+ "acc": 0.35740439381611067,
2004
+ "acc_norm": 0.32231489015459724,
2005
+ "acc_norm_stderr": 0.0047136280360736155,
2006
+ "acc_stderr": 0.0048333692129862065,
2007
+ "prompt_name": "should assume",
2008
+ "task_name": "mnli_mismatched"
2009
+ }
2010
+ },
2011
+ "mnli_mismatched+take the following as truth": {
2012
+ "2022-07-12-22-45-57": {
2013
+ "acc": 0.3522172497965826,
2014
+ "acc_norm": 0.3263832384052075,
2015
+ "acc_norm_stderr": 0.004729024000627127,
2016
+ "acc_stderr": 0.004817493665633715,
2017
+ "prompt_name": "take the following as truth",
2018
+ "task_name": "mnli_mismatched"
2019
+ }
2020
+ },
2021
+ "mrpc": {
2022
+ "2022-07-13-11-29-13": {
2023
+ "acc": 0.6813725490196079,
2024
+ "acc_stderr": 0.023095996571841474,
2025
+ "f1": 0.8104956268221574,
2026
+ "f1_stderr": 0.016329211455484924
2027
+ }
2028
+ },
2029
+ "multirc": {
2030
+ "2022-07-13-11-29-13": {
2031
+ "acc": 0.011542497376705142,
2032
+ "acc_stderr": 0.003461867320927179
2033
+ }
2034
+ },
2035
+ "multirc+I was going to say\u2026": {
2036
+ "2022-07-12-22-45-57": {
2037
+ "acc": 0.5082508250825083,
2038
+ "acc_norm": 0.4280115511551155,
2039
+ "acc_norm_stderr": 0.007106976252751536,
2040
+ "acc_stderr": 0.007180825220905937,
2041
+ "prompt_name": "I was going to say\u2026",
2042
+ "task_name": "multirc"
2043
+ }
2044
+ },
2045
+ "multirc+Would it be good to answer\u2026": {
2046
+ "2022-07-12-22-45-57": {
2047
+ "acc": 0.45173267326732675,
2048
+ "acc_norm": 0.4278052805280528,
2049
+ "acc_norm_stderr": 0.007106544557507229,
2050
+ "acc_stderr": 0.007148261386088041,
2051
+ "prompt_name": "Would it be good to answer\u2026",
2052
+ "task_name": "multirc"
2053
+ }
2054
+ },
2055
+ "multirc+confirm": {
2056
+ "2022-07-12-22-45-57": {
2057
+ "acc": 0.4280115511551155,
2058
+ "acc_norm": 0.4280115511551155,
2059
+ "acc_norm_stderr": 0.007106976252751536,
2060
+ "acc_stderr": 0.007106976252751536,
2061
+ "prompt_name": "confirm",
2062
+ "task_name": "multirc"
2063
+ }
2064
+ },
2065
+ "multirc+correct": {
2066
+ "2022-07-12-22-45-57": {
2067
+ "acc": 0.5532178217821783,
2068
+ "acc_norm": 0.4643151815181518,
2069
+ "acc_norm_stderr": 0.00716348904876326,
2070
+ "acc_stderr": 0.007141007544074806,
2071
+ "prompt_name": "correct",
2072
+ "task_name": "multirc"
2073
+ }
2074
+ },
2075
+ "multirc+decide_valid": {
2076
+ "2022-07-12-22-45-57": {
2077
+ "acc": 0.5107260726072608,
2078
+ "acc_norm": 0.4280115511551155,
2079
+ "acc_norm_stderr": 0.007106976252751536,
2080
+ "acc_stderr": 0.007180150402551771,
2081
+ "prompt_name": "decide_valid",
2082
+ "task_name": "multirc"
2083
+ }
2084
+ },
2085
+ "multirc+found_this_answer": {
2086
+ "2022-07-12-22-45-57": {
2087
+ "acc": 0.4278052805280528,
2088
+ "acc_norm": 0.4280115511551155,
2089
+ "acc_norm_stderr": 0.007106976252751536,
2090
+ "acc_stderr": 0.007106544557507229,
2091
+ "prompt_name": "found_this_answer",
2092
+ "task_name": "multirc"
2093
+ }
2094
+ },
2095
+ "multirc+grading": {
2096
+ "2022-07-12-22-45-57": {
2097
+ "acc": 0.429042904290429,
2098
+ "acc_norm": 0.4280115511551155,
2099
+ "acc_norm_stderr": 0.007106976252751536,
2100
+ "acc_stderr": 0.007109115814226985,
2101
+ "prompt_name": "grading",
2102
+ "task_name": "multirc"
2103
+ }
2104
+ },
2105
+ "multirc+is the correct answer\u2026": {
2106
+ "2022-07-12-22-45-57": {
2107
+ "acc": 0.4498762376237624,
2108
+ "acc_norm": 0.4273927392739274,
2109
+ "acc_norm_stderr": 0.007105677382236137,
2110
+ "acc_stderr": 0.0071456249799065185,
2111
+ "prompt_name": "is the correct answer\u2026",
2112
+ "task_name": "multirc"
2113
+ }
2114
+ },
2115
+ "multirc+is\u2026 a correct answer?": {
2116
+ "2022-07-12-22-45-57": {
2117
+ "acc": 0.4278052805280528,
2118
+ "acc_norm": 0.4280115511551155,
2119
+ "acc_norm_stderr": 0.007106976252751536,
2120
+ "acc_stderr": 0.007106544557507229,
2121
+ "prompt_name": "is\u2026 a correct answer?",
2122
+ "task_name": "multirc"
2123
+ }
2124
+ },
2125
+ "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": {
2126
+ "2022-07-12-22-45-57": {
2127
+ "acc": 0.5030940594059405,
2128
+ "acc_norm": 0.42883663366336633,
2129
+ "acc_norm_stderr": 0.007108690423137722,
2130
+ "acc_stderr": 0.007181665598939583,
2131
+ "prompt_name": "paragraph\u2026 question\u2026 is it\u2026 ?",
2132
+ "task_name": "multirc"
2133
+ }
2134
+ },
2135
+ "openbookqa": {
2136
+ "2022-07-13-11-29-13": {
2137
+ "acc": 0.214,
2138
+ "acc_norm": 0.298,
2139
+ "acc_norm_stderr": 0.020475118092988978,
2140
+ "acc_stderr": 0.01835979750238702
2141
+ }
2142
+ },
2143
+ "piqa": {
2144
+ "2022-07-13-11-29-13": {
2145
+ "acc": 0.6871599564744287,
2146
+ "acc_norm": 0.7002176278563657,
2147
+ "acc_norm_stderr": 0.010689686967138092,
2148
+ "acc_stderr": 0.010817714425701112
2149
+ }
2150
+ },
2151
+ "prost": {
2152
+ "2022-07-13-11-29-13": {
2153
+ "acc": 0.23505550811272416,
2154
+ "acc_norm": 0.2670260461144321,
2155
+ "acc_norm_stderr": 0.0032321702981822874,
2156
+ "acc_stderr": 0.0030979423271461875
2157
+ }
2158
+ },
2159
+ "pubmedqa": {
2160
+ "2022-07-13-11-29-13": {
2161
+ "acc": 0.56,
2162
+ "acc_stderr": 0.015704987954361798
2163
+ }
2164
+ },
2165
+ "qnli": {
2166
+ "2022-07-13-11-29-13": {
2167
+ "acc": 0.4962474830679114,
2168
+ "acc_stderr": 0.006765220016415222
2169
+ }
2170
+ },
2171
+ "qqp": {
2172
+ "2022-07-13-11-29-13": {
2173
+ "acc": 0.3681424684640119,
2174
+ "acc_stderr": 0.0023986729832071816,
2175
+ "f1": 0.5381138352498734,
2176
+ "f1_stderr": 0.002555831569895799
2177
+ }
2178
+ },
2179
+ "qqp+answer": {
2180
+ "2022-07-12-22-45-57": {
2181
+ "acc": 0.40558990848379917,
2182
+ "acc_norm": 0.36816720257234725,
2183
+ "acc_norm_stderr": 0.002398706610614492,
2184
+ "acc_stderr": 0.002441969063495092,
2185
+ "prompt_name": "answer",
2186
+ "task_name": "qqp"
2187
+ }
2188
+ },
2189
+ "qqp+duplicate": {
2190
+ "2022-07-12-22-45-57": {
2191
+ "acc": 0.3788523373732377,
2192
+ "acc_norm": 0.36816720257234725,
2193
+ "acc_norm_stderr": 0.002398706610614492,
2194
+ "acc_stderr": 0.002412603277723025,
2195
+ "prompt_name": "duplicate",
2196
+ "task_name": "qqp"
2197
+ }
2198
+ },
2199
+ "qqp+duplicate or not": {
2200
+ "2022-07-13-19-23-37": {
2201
+ "acc": 0.5761315854563444,
2202
+ "acc_norm": 0.6318327974276527,
2203
+ "acc_norm_stderr": 0.002398706610614492,
2204
+ "acc_stderr": 0.0024577056660753426,
2205
+ "prompt_name": "duplicate or not",
2206
+ "task_name": "qqp"
2207
+ }
2208
+ },
2209
+ "qqp+meaning": {
2210
+ "2022-07-13-19-23-37": {
2211
+ "acc": 0.3681424684640119,
2212
+ "acc_norm": 0.36816720257234725,
2213
+ "acc_norm_stderr": 0.002398706610614492,
2214
+ "acc_stderr": 0.0023986729832071916,
2215
+ "prompt_name": "meaning",
2216
+ "task_name": "qqp"
2217
+ }
2218
+ },
2219
+ "qqp+quora": {
2220
+ "2022-07-13-19-23-37": {
2221
+ "acc": 0.36821667078901804,
2222
+ "acc_norm": 0.36816720257234725,
2223
+ "acc_norm_stderr": 0.002398706610614492,
2224
+ "acc_stderr": 0.0023987738450886556,
2225
+ "prompt_name": "quora",
2226
+ "task_name": "qqp"
2227
+ }
2228
+ },
2229
+ "qqp+same thing": {
2230
+ "2022-07-13-19-23-37": {
2231
+ "acc": 0.5099431115508286,
2232
+ "acc_norm": 0.36816720257234725,
2233
+ "acc_norm_stderr": 0.002398706610614492,
2234
+ "acc_stderr": 0.002486208885430481,
2235
+ "prompt_name": "same thing",
2236
+ "task_name": "qqp"
2237
+ }
2238
+ },
2239
+ "race": {
2240
+ "2022-07-13-11-29-13": {
2241
+ "acc": 0.3320574162679426,
2242
+ "acc_stderr": 0.014575582129545914
2243
+ }
2244
+ },
2245
+ "rte": {
2246
+ "2022-07-13-11-29-13": {
2247
+ "acc": 0.5342960288808665,
2248
+ "acc_stderr": 0.030025579819366426
2249
+ }
2250
+ },
2251
+ "rte+does the claim\u2026 follow the fact\u2026": {
2252
+ "2022-07-13-19-23-37": {
2253
+ "acc": 0.4729241877256318,
2254
+ "acc_norm": 0.5270758122743683,
2255
+ "acc_norm_stderr": 0.0300523034631437,
2256
+ "acc_stderr": 0.030052303463143706,
2257
+ "prompt_name": "does the claim\u2026 follow the fact\u2026",
2258
+ "task_name": "rte"
2259
+ }
2260
+ },
2261
+ "rte+entailment explained": {
2262
+ "2022-07-13-19-23-37": {
2263
+ "acc": 0.49458483754512633,
2264
+ "acc_norm": 0.4729241877256318,
2265
+ "acc_norm_stderr": 0.0300523034631437,
2266
+ "acc_stderr": 0.030094698123239966,
2267
+ "prompt_name": "entailment explained",
2268
+ "task_name": "rte"
2269
+ }
2270
+ },
2271
+ "rte+imply": {
2272
+ "2022-07-13-19-23-37": {
2273
+ "acc": 0.48375451263537905,
2274
+ "acc_norm": 0.5270758122743683,
2275
+ "acc_norm_stderr": 0.0300523034631437,
2276
+ "acc_stderr": 0.030080573208738064,
2277
+ "prompt_name": "imply",
2278
+ "task_name": "rte"
2279
+ }
2280
+ },
2281
+ "rte+imply separated": {
2282
+ "2022-07-13-19-23-37": {
2283
+ "acc": 0.45126353790613716,
2284
+ "acc_norm": 0.5270758122743683,
2285
+ "acc_norm_stderr": 0.0300523034631437,
2286
+ "acc_stderr": 0.029953149241808943,
2287
+ "prompt_name": "imply separated",
2288
+ "task_name": "rte"
2289
+ }
2290
+ },
2291
+ "rte+mean": {
2292
+ "2022-07-13-19-23-37": {
2293
+ "acc": 0.48014440433212996,
2294
+ "acc_norm": 0.5270758122743683,
2295
+ "acc_norm_stderr": 0.0300523034631437,
2296
+ "acc_stderr": 0.030072723167317194,
2297
+ "prompt_name": "mean",
2298
+ "task_name": "rte"
2299
+ }
2300
+ },
2301
+ "sciq": {
2302
+ "2022-07-13-11-29-13": {
2303
+ "acc": 0.853,
2304
+ "acc_norm": 0.771,
2305
+ "acc_norm_stderr": 0.013294199326613609,
2306
+ "acc_stderr": 0.011203415395160335
2307
+ }
2308
+ },
2309
+ "sst": {
2310
+ "2022-07-13-11-29-13": {
2311
+ "acc": 0.6823394495412844,
2312
+ "acc_stderr": 0.015775124845202545
2313
+ }
2314
+ },
2315
+ "sst+following positive negative": {
2316
+ "2022-07-13-19-23-37": {
2317
+ "acc": 0.8061926605504587,
2318
+ "acc_norm": 0.8061926605504587,
2319
+ "acc_norm_stderr": 0.013393542261521812,
2320
+ "acc_stderr": 0.013393542261521812,
2321
+ "prompt_name": "following positive negative",
2322
+ "task_name": "sst"
2323
+ }
2324
+ },
2325
+ "sst+happy or mad": {
2326
+ "2022-07-13-19-23-37": {
2327
+ "acc": 0.5091743119266054,
2328
+ "acc_norm": 0.5091743119266054,
2329
+ "acc_norm_stderr": 0.01693900152535154,
2330
+ "acc_stderr": 0.01693900152535154,
2331
+ "prompt_name": "happy or mad",
2332
+ "task_name": "sst"
2333
+ }
2334
+ },
2335
+ "sst+positive negative after": {
2336
+ "2022-07-13-19-23-37": {
2337
+ "acc": 0.6204128440366973,
2338
+ "acc_norm": 0.6204128440366973,
2339
+ "acc_norm_stderr": 0.016443227556688766,
2340
+ "acc_stderr": 0.016443227556688766,
2341
+ "prompt_name": "positive negative after",
2342
+ "task_name": "sst"
2343
+ }
2344
+ },
2345
+ "sst+review": {
2346
+ "2022-07-13-19-23-37": {
2347
+ "acc": 0.5091743119266054,
2348
+ "acc_norm": 0.5091743119266054,
2349
+ "acc_norm_stderr": 0.01693900152535154,
2350
+ "acc_stderr": 0.01693900152535154,
2351
+ "prompt_name": "review",
2352
+ "task_name": "sst"
2353
+ }
2354
+ },
2355
+ "sst+said": {
2356
+ "2022-07-13-19-23-37": {
2357
+ "acc": 0.4908256880733945,
2358
+ "acc_norm": 0.5091743119266054,
2359
+ "acc_norm_stderr": 0.01693900152535154,
2360
+ "acc_stderr": 0.01693900152535154,
2361
+ "prompt_name": "said",
2362
+ "task_name": "sst"
2363
+ }
2364
+ },
2365
+ "triviaqa": {
2366
+ "2022-07-13-11-29-13": {
2367
+ "acc": 0.0313798285158667,
2368
+ "acc_stderr": 0.0016392014864795154
2369
+ }
2370
+ },
2371
+ "webqs": {
2372
+ "2022-07-13-11-29-13": {
2373
+ "acc": 0.012795275590551181,
2374
+ "acc_stderr": 0.0024938680596856277
2375
+ }
2376
+ },
2377
+ "wic": {
2378
+ "2022-07-13-11-29-13": {
2379
+ "acc": 0.5,
2380
+ "acc_stderr": 0.01981072129375818
2381
+ }
2382
+ },
2383
+ "wic+GPT-3-prompt": {
2384
+ "2022-07-14-10-03-25": {
2385
+ "acc": 0.5,
2386
+ "acc_norm": 0.5,
2387
+ "acc_norm_stderr": 0.01981072129375818,
2388
+ "acc_stderr": 0.01981072129375818,
2389
+ "prompt_name": "GPT-3-prompt",
2390
+ "task_name": "wic"
2391
+ }
2392
+ },
2393
+ "wic+GPT-3-prompt-with-label": {
2394
+ "2022-07-14-10-03-25": {
2395
+ "acc": 0.49216300940438873,
2396
+ "acc_norm": 0.5,
2397
+ "acc_norm_stderr": 0.01981072129375818,
2398
+ "acc_stderr": 0.019808287657813832,
2399
+ "prompt_name": "GPT-3-prompt-with-label",
2400
+ "task_name": "wic"
2401
+ }
2402
+ },
2403
+ "wic+affirmation_true_or_false": {
2404
+ "2022-07-14-10-03-25": {
2405
+ "acc": 0.5,
2406
+ "acc_norm": 0.5078369905956113,
2407
+ "acc_norm_stderr": 0.019808287657813832,
2408
+ "acc_stderr": 0.01981072129375818,
2409
+ "prompt_name": "affirmation_true_or_false",
2410
+ "task_name": "wic"
2411
+ }
2412
+ },
2413
+ "wic+grammar_homework": {
2414
+ "2022-07-14-10-03-25": {
2415
+ "acc": 0.5094043887147336,
2416
+ "acc_norm": 0.49843260188087773,
2417
+ "acc_norm_stderr": 0.019810623954060382,
2418
+ "acc_stderr": 0.019807216763271497,
2419
+ "prompt_name": "grammar_homework",
2420
+ "task_name": "wic"
2421
+ }
2422
+ },
2423
+ "wic+polysemous": {
2424
+ "2022-07-14-10-03-25": {
2425
+ "acc": 0.512539184952978,
2426
+ "acc_norm": 0.49843260188087773,
2427
+ "acc_norm_stderr": 0.019810623954060382,
2428
+ "acc_stderr": 0.019804490588592596,
2429
+ "prompt_name": "polysemous",
2430
+ "task_name": "wic"
2431
+ }
2432
+ },
2433
+ "wic+question-context": {
2434
+ "2022-07-14-10-03-25": {
2435
+ "acc": 0.5266457680250783,
2436
+ "acc_norm": 0.5031347962382445,
2437
+ "acc_norm_stderr": 0.019810331932097542,
2438
+ "acc_stderr": 0.019782570188812167,
2439
+ "prompt_name": "question-context",
2440
+ "task_name": "wic"
2441
+ }
2442
+ },
2443
+ "wic+question-context-meaning": {
2444
+ "2022-07-14-10-03-25": {
2445
+ "acc": 0.5438871473354232,
2446
+ "acc_norm": 0.5015673981191222,
2447
+ "acc_norm_stderr": 0.019810623954060382,
2448
+ "acc_stderr": 0.019734259601993404,
2449
+ "prompt_name": "question-context-meaning",
2450
+ "task_name": "wic"
2451
+ }
2452
+ },
2453
+ "wic+question-context-meaning-with-label": {
2454
+ "2022-07-14-10-03-25": {
2455
+ "acc": 0.5156739811912225,
2456
+ "acc_norm": 0.5015673981191222,
2457
+ "acc_norm_stderr": 0.019810623954060382,
2458
+ "acc_stderr": 0.019800984955347847,
2459
+ "prompt_name": "question-context-meaning-with-label",
2460
+ "task_name": "wic"
2461
+ }
2462
+ },
2463
+ "wic+same_sense": {
2464
+ "2022-07-14-10-03-25": {
2465
+ "acc": 0.5047021943573667,
2466
+ "acc_norm": 0.5,
2467
+ "acc_norm_stderr": 0.01981072129375818,
2468
+ "acc_stderr": 0.019809845219259763,
2469
+ "prompt_name": "same_sense",
2470
+ "task_name": "wic"
2471
+ }
2472
+ },
2473
+ "wic+similar-sense": {
2474
+ "2022-07-14-10-03-25": {
2475
+ "acc": 0.542319749216301,
2476
+ "acc_norm": 0.5,
2477
+ "acc_norm_stderr": 0.01981072129375818,
2478
+ "acc_stderr": 0.01973963328373276,
2479
+ "prompt_name": "similar-sense",
2480
+ "task_name": "wic"
2481
+ }
2482
+ },
2483
+ "winogrande": {
2484
+ "2022-07-13-11-29-13": {
2485
+ "acc": 0.5730071033938438,
2486
+ "acc_stderr": 0.013901878072575058
2487
+ }
2488
+ },
2489
+ "wnli": {
2490
+ "2022-07-13-11-29-13": {
2491
+ "acc": 0.43661971830985913,
2492
+ "acc_stderr": 0.0592793555841297
2493
+ }
2494
+ },
2495
+ "wnli+confident": {
2496
+ "2022-07-14-10-03-25": {
2497
+ "acc": 0.43661971830985913,
2498
+ "acc_norm": 0.43661971830985913,
2499
+ "acc_norm_stderr": 0.0592793555841297,
2500
+ "acc_stderr": 0.0592793555841297,
2501
+ "prompt_name": "confident",
2502
+ "task_name": "wnli"
2503
+ }
2504
+ },
2505
+ "wnli+entailment explained": {
2506
+ "2022-07-14-10-03-25": {
2507
+ "acc": 0.39436619718309857,
2508
+ "acc_norm": 0.43661971830985913,
2509
+ "acc_norm_stderr": 0.0592793555841297,
2510
+ "acc_stderr": 0.058412510854444266,
2511
+ "prompt_name": "entailment explained",
2512
+ "task_name": "wnli"
2513
+ }
2514
+ },
2515
+ "wnli+imply": {
2516
+ "2022-07-14-10-03-25": {
2517
+ "acc": 0.4225352112676056,
2518
+ "acc_norm": 0.43661971830985913,
2519
+ "acc_norm_stderr": 0.0592793555841297,
2520
+ "acc_stderr": 0.05903984205682581,
2521
+ "prompt_name": "imply",
2522
+ "task_name": "wnli"
2523
+ }
2524
+ },
2525
+ "wnli+justified": {
2526
+ "2022-07-14-10-03-25": {
2527
+ "acc": 0.43661971830985913,
2528
+ "acc_norm": 0.43661971830985913,
2529
+ "acc_norm_stderr": 0.0592793555841297,
2530
+ "acc_stderr": 0.0592793555841297,
2531
+ "prompt_name": "justified",
2532
+ "task_name": "wnli"
2533
+ }
2534
+ },
2535
+ "wnli+mean": {
2536
+ "2022-07-14-10-03-25": {
2537
+ "acc": 0.6619718309859155,
2538
+ "acc_norm": 0.43661971830985913,
2539
+ "acc_norm_stderr": 0.0592793555841297,
2540
+ "acc_stderr": 0.05653887739133513,
2541
+ "prompt_name": "mean",
2542
+ "task_name": "wnli"
2543
+ }
2544
+ },
2545
+ "wsc": {
2546
+ "2022-07-13-11-29-13": {
2547
+ "acc": 0.36538461538461536,
2548
+ "acc_stderr": 0.0474473339327792
2549
+ }
2550
+ },
2551
+ "wsc+GPT-3 Style": {
2552
+ "2022-07-14-10-03-25": {
2553
+ "acc": 0.36538461538461536,
2554
+ "acc_norm": 0.36538461538461536,
2555
+ "acc_norm_stderr": 0.0474473339327792,
2556
+ "acc_stderr": 0.0474473339327792,
2557
+ "prompt_name": "GPT-3 Style",
2558
+ "task_name": "wsc"
2559
+ }
2560
+ },
2561
+ "wsc+I think they mean": {
2562
+ "2022-07-14-10-03-25": {
2563
+ "acc": 0.36538461538461536,
2564
+ "acc_norm": 0.36538461538461536,
2565
+ "acc_norm_stderr": 0.0474473339327792,
2566
+ "acc_stderr": 0.0474473339327792,
2567
+ "prompt_name": "I think they mean",
2568
+ "task_name": "wsc"
2569
+ }
2570
+ },
2571
+ "wsc+Who or what is/are": {
2572
+ "2022-07-14-10-03-25": {
2573
+ "acc": 0.40384615384615385,
2574
+ "acc_norm": 0.36538461538461536,
2575
+ "acc_norm_stderr": 0.0474473339327792,
2576
+ "acc_stderr": 0.048346889526540184,
2577
+ "prompt_name": "Who or what is/are",
2578
+ "task_name": "wsc"
2579
+ }
2580
+ },
2581
+ "wsc+by p they mean": {
2582
+ "2022-07-14-10-03-25": {
2583
+ "acc": 0.36538461538461536,
2584
+ "acc_norm": 0.36538461538461536,
2585
+ "acc_norm_stderr": 0.0474473339327792,
2586
+ "acc_stderr": 0.0474473339327792,
2587
+ "prompt_name": "by p they mean",
2588
+ "task_name": "wsc"
2589
+ }
2590
+ },
2591
+ "wsc+does p stand for": {
2592
+ "2022-07-14-10-03-25": {
2593
+ "acc": 0.375,
2594
+ "acc_norm": 0.36538461538461536,
2595
+ "acc_norm_stderr": 0.0474473339327792,
2596
+ "acc_stderr": 0.04770204856076104,
2597
+ "prompt_name": "does p stand for",
2598
+ "task_name": "wsc"
2599
+ }
2600
+ },
2601
+ "wsc+does the pronoun refer to": {
2602
+ "2022-07-14-10-03-25": {
2603
+ "acc": 0.5480769230769231,
2604
+ "acc_norm": 0.36538461538461536,
2605
+ "acc_norm_stderr": 0.0474473339327792,
2606
+ "acc_stderr": 0.049038186969314335,
2607
+ "prompt_name": "does the pronoun refer to",
2608
+ "task_name": "wsc"
2609
+ }
2610
+ },
2611
+ "wsc+in other words": {
2612
+ "2022-07-14-10-03-25": {
2613
+ "acc": 0.36538461538461536,
2614
+ "acc_norm": 0.5288461538461539,
2615
+ "acc_norm_stderr": 0.04918440626354964,
2616
+ "acc_stderr": 0.0474473339327792,
2617
+ "prompt_name": "in other words",
2618
+ "task_name": "wsc"
2619
+ }
2620
+ },
2621
+ "wsc+p is/are r": {
2622
+ "2022-07-14-10-03-25": {
2623
+ "acc": 0.36538461538461536,
2624
+ "acc_norm": 0.34615384615384615,
2625
+ "acc_norm_stderr": 0.04687634642174987,
2626
+ "acc_stderr": 0.0474473339327792,
2627
+ "prompt_name": "p is/are r",
2628
+ "task_name": "wsc"
2629
+ }
2630
+ },
2631
+ "wsc+replaced with": {
2632
+ "2022-07-14-10-03-25": {
2633
+ "acc": 0.6153846153846154,
2634
+ "acc_norm": 0.36538461538461536,
2635
+ "acc_norm_stderr": 0.0474473339327792,
2636
+ "acc_stderr": 0.047936688680750406,
2637
+ "prompt_name": "replaced with",
2638
+ "task_name": "wsc"
2639
+ }
2640
+ },
2641
+ "wsc+the pronoun refers to": {
2642
+ "2022-07-14-10-03-25": {
2643
+ "acc": 0.36538461538461536,
2644
+ "acc_norm": 0.5865384615384616,
2645
+ "acc_norm_stderr": 0.04852294969729053,
2646
+ "acc_stderr": 0.0474473339327792,
2647
+ "prompt_name": "the pronoun refers to",
2648
+ "task_name": "wsc"
2649
+ }
2650
+ }
2651
+ },
2652
+ "versions": {
2653
+ "arc_challenge": 0,
2654
+ "arc_easy": 0,
2655
+ "axb+GPT-3 style": 0,
2656
+ "axb+MNLI crowdsource": 0,
2657
+ "axb+based on the previous passage": 0,
2658
+ "axb+can we infer": 0,
2659
+ "axb+does it follow that": 0,
2660
+ "axb+does this imply": 0,
2661
+ "axb+guaranteed true": 0,
2662
+ "axb+justified in saying": 0,
2663
+ "axb+must be true": 0,
2664
+ "axb+should assume": 0,
2665
+ "axg+GPT-3 style": 0,
2666
+ "axg+MNLI crowdsource": 0,
2667
+ "axg+based on the previous passage": 0,
2668
+ "axg+can we infer": 0,
2669
+ "axg+does it follow that": 0,
2670
+ "axg+does this imply": 0,
2671
+ "axg+guaranteed true": 0,
2672
+ "axg+justified in saying": 0,
2673
+ "axg+must be true": 0,
2674
+ "axg+should assume": 0,
2675
+ "boolq": 1,
2676
+ "boolq+GPT-3 Style": 0,
2677
+ "boolq+I wonder\u2026": 0,
2678
+ "boolq+after_reading": 0,
2679
+ "boolq+based on the following passage": 0,
2680
+ "boolq+based on the previous passage": 0,
2681
+ "boolq+could you tell me\u2026": 0,
2682
+ "boolq+exam": 0,
2683
+ "boolq+exercise": 0,
2684
+ "boolq+valid_binary": 0,
2685
+ "boolq+yes_no_question": 0,
2686
+ "cb+GPT-3 style": 0,
2687
+ "cb+MNLI crowdsource": 0,
2688
+ "cb+always/sometimes/never": 0,
2689
+ "cb+based on the previous passage": 0,
2690
+ "cb+can we infer": 0,
2691
+ "cb+claim true/false/inconclusive": 0,
2692
+ "cb+consider always/sometimes/never": 0,
2693
+ "cb+does it follow that": 0,
2694
+ "cb+does this imply": 0,
2695
+ "cb+guaranteed true": 0,
2696
+ "cb+guaranteed/possible/impossible": 0,
2697
+ "cb+justified in saying": 0,
2698
+ "cb+must be true": 0,
2699
+ "cb+should assume": 0,
2700
+ "cb+take the following as truth": 0,
2701
+ "cola+Following sentence acceptable": 0,
2702
+ "cola+Make sense yes no": 0,
2703
+ "cola+Previous sentence acceptable": 0,
2704
+ "cola+editing": 0,
2705
+ "cola+is_this_correct": 0,
2706
+ "copa": 0,
2707
+ "copa+C1 or C2? premise, so/because\u2026": 0,
2708
+ "copa+best_option": 0,
2709
+ "copa+cause_effect": 0,
2710
+ "copa+choose": 0,
2711
+ "copa+exercise": 0,
2712
+ "copa+i_am_hesitating": 0,
2713
+ "copa+more likely": 0,
2714
+ "copa+plausible_alternatives": 0,
2715
+ "crows_pairs_english+1": 0,
2716
+ "crows_pairs_english+2": 0,
2717
+ "crows_pairs_english+3": 0,
2718
+ "crows_pairs_english+4": 0,
2719
+ "crows_pairs_english+A_preference": 0,
2720
+ "crows_pairs_english+A_reality_check": 0,
2721
+ "crows_pairs_english+A_stereotype_true": 0,
2722
+ "crows_pairs_french+1_fr": 0,
2723
+ "crows_pairs_french+2_fr": 0,
2724
+ "crows_pairs_french+3_fr": 0,
2725
+ "crows_pairs_french+4_fr": 0,
2726
+ "crows_pairs_french+A_preference_fr": 0,
2727
+ "crows_pairs_french+A_reality_check_fr": 0,
2728
+ "crows_pairs_french+A_stereotype_true_fr": 0,
2729
+ "diabla+Is the error present? (same lang)": 0,
2730
+ "diabla+Which is automatic?": 0,
2731
+ "gsarti/flores_101_afr+null": 0,
2732
+ "gsarti/flores_101_amh+null": 0,
2733
+ "gsarti/flores_101_ara+null": 0,
2734
+ "gsarti/flores_101_asm+null": 0,
2735
+ "gsarti/flores_101_ast+null": 0,
2736
+ "gsarti/flores_101_azj+null": 0,
2737
+ "gsarti/flores_101_bel+null": 0,
2738
+ "gsarti/flores_101_ben+null": 0,
2739
+ "gsarti/flores_101_bos+null": 0,
2740
+ "gsarti/flores_101_bul+null": 0,
2741
+ "gsarti/flores_101_cat+null": 0,
2742
+ "gsarti/flores_101_ceb+null": 0,
2743
+ "gsarti/flores_101_ces+null": 0,
2744
+ "gsarti/flores_101_ckb+null": 0,
2745
+ "gsarti/flores_101_cym+null": 0,
2746
+ "gsarti/flores_101_dan+null": 0,
2747
+ "gsarti/flores_101_deu+null": 0,
2748
+ "gsarti/flores_101_ell+null": 0,
2749
+ "gsarti/flores_101_eng+null": 0,
2750
+ "gsarti/flores_101_est+null": 0,
2751
+ "gsarti/flores_101_fas+null": 0,
2752
+ "gsarti/flores_101_fin+null": 0,
2753
+ "gsarti/flores_101_fra+null": 0,
2754
+ "gsarti/flores_101_ful+null": 0,
2755
+ "gsarti/flores_101_gle+null": 0,
2756
+ "gsarti/flores_101_glg+null": 0,
2757
+ "gsarti/flores_101_guj+null": 0,
2758
+ "gsarti/flores_101_hau+null": 0,
2759
+ "gsarti/flores_101_heb+null": 0,
2760
+ "gsarti/flores_101_hin+null": 0,
2761
+ "gsarti/flores_101_hrv+null": 0,
2762
+ "gsarti/flores_101_hun+null": 0,
2763
+ "gsarti/flores_101_hye+null": 0,
2764
+ "gsarti/flores_101_ibo+null": 0,
2765
+ "gsarti/flores_101_ind+null": 0,
2766
+ "gsarti/flores_101_isl+null": 0,
2767
+ "gsarti/flores_101_ita+null": 0,
2768
+ "gsarti/flores_101_jav+null": 0,
2769
+ "gsarti/flores_101_jpn+null": 0,
2770
+ "gsarti/flores_101_kam+null": 0,
2771
+ "gsarti/flores_101_kan+null": 0,
2772
+ "gsarti/flores_101_kat+null": 0,
2773
+ "gsarti/flores_101_kaz+null": 0,
2774
+ "gsarti/flores_101_kea+null": 0,
2775
+ "gsarti/flores_101_kir+null": 0,
2776
+ "gsarti/flores_101_kor+null": 0,
2777
+ "gsarti/flores_101_lao+null": 0,
2778
+ "gsarti/flores_101_lav+null": 0,
2779
+ "gsarti/flores_101_lin+null": 0,
2780
+ "gsarti/flores_101_lit+null": 0,
2781
+ "gsarti/flores_101_ltz+null": 0,
2782
+ "gsarti/flores_101_lug+null": 0,
2783
+ "gsarti/flores_101_luo+null": 0,
2784
+ "gsarti/flores_101_mal+null": 0,
2785
+ "gsarti/flores_101_mar+null": 0,
2786
+ "gsarti/flores_101_mkd+null": 0,
2787
+ "gsarti/flores_101_mlt+null": 0,
2788
+ "gsarti/flores_101_mon+null": 0,
2789
+ "gsarti/flores_101_mri+null": 0,
2790
+ "gsarti/flores_101_msa+null": 0,
2791
+ "gsarti/flores_101_mya+null": 0,
2792
+ "gsarti/flores_101_nld+null": 0,
2793
+ "gsarti/flores_101_nob+null": 0,
2794
+ "gsarti/flores_101_npi+null": 0,
2795
+ "gsarti/flores_101_nso+null": 0,
2796
+ "gsarti/flores_101_nya+null": 0,
2797
+ "gsarti/flores_101_oci+null": 0,
2798
+ "gsarti/flores_101_orm+null": 0,
2799
+ "gsarti/flores_101_ory+null": 0,
2800
+ "gsarti/flores_101_pan+null": 0,
2801
+ "gsarti/flores_101_pol+null": 0,
2802
+ "gsarti/flores_101_por+null": 0,
2803
+ "gsarti/flores_101_pus+null": 0,
2804
+ "gsarti/flores_101_ron+null": 0,
2805
+ "gsarti/flores_101_rus+null": 0,
2806
+ "gsarti/flores_101_slk+null": 0,
2807
+ "gsarti/flores_101_slv+null": 0,
2808
+ "gsarti/flores_101_sna+null": 0,
2809
+ "gsarti/flores_101_snd+null": 0,
2810
+ "gsarti/flores_101_som+null": 0,
2811
+ "gsarti/flores_101_spa+null": 0,
2812
+ "gsarti/flores_101_srp+null": 0,
2813
+ "gsarti/flores_101_swe+null": 0,
2814
+ "gsarti/flores_101_swh+null": 0,
2815
+ "gsarti/flores_101_tam+null": 0,
2816
+ "gsarti/flores_101_tel+null": 0,
2817
+ "gsarti/flores_101_tgk+null": 0,
2818
+ "gsarti/flores_101_tgl+null": 0,
2819
+ "gsarti/flores_101_tha+null": 0,
2820
+ "gsarti/flores_101_tur+null": 0,
2821
+ "gsarti/flores_101_ukr+null": 0,
2822
+ "gsarti/flores_101_umb+null": 0,
2823
+ "gsarti/flores_101_urd+null": 0,
2824
+ "gsarti/flores_101_uzb+null": 0,
2825
+ "gsarti/flores_101_vie+null": 0,
2826
+ "gsarti/flores_101_wol+null": 0,
2827
+ "gsarti/flores_101_xho+null": 0,
2828
+ "gsarti/flores_101_yor+null": 0,
2829
+ "gsarti/flores_101_zho_simpl+null": 0,
2830
+ "gsarti/flores_101_zho_trad+null": 0,
2831
+ "gsarti/flores_101_zul+null": 0,
2832
+ "headqa": 0,
2833
+ "hellaswag": 0,
2834
+ "lambada": 0,
2835
+ "logiqa": 0,
2836
+ "mathqa": 0,
2837
+ "mc_taco": 0,
2838
+ "mnli+GPT-3 style": 0,
2839
+ "mnli+MNLI crowdsource": 0,
2840
+ "mnli+always/sometimes/never": 0,
2841
+ "mnli+based on the previous passage": 0,
2842
+ "mnli+can we infer": 0,
2843
+ "mnli+claim true/false/inconclusive": 0,
2844
+ "mnli+consider always/sometimes/never": 0,
2845
+ "mnli+does it follow that": 0,
2846
+ "mnli+does this imply": 0,
2847
+ "mnli+guaranteed true": 0,
2848
+ "mnli+guaranteed/possible/impossible": 0,
2849
+ "mnli+justified in saying": 0,
2850
+ "mnli+must be true": 0,
2851
+ "mnli+should assume": 0,
2852
+ "mnli+take the following as truth": 0,
2853
+ "mnli_mismatched+GPT-3 style": 0,
2854
+ "mnli_mismatched+MNLI crowdsource": 0,
2855
+ "mnli_mismatched+always/sometimes/never": 0,
2856
+ "mnli_mismatched+based on the previous passage": 0,
2857
+ "mnli_mismatched+can we infer": 0,
2858
+ "mnli_mismatched+claim true/false/inconclusive": 0,
2859
+ "mnli_mismatched+consider always/sometimes/never": 0,
2860
+ "mnli_mismatched+does it follow that": 0,
2861
+ "mnli_mismatched+does this imply": 0,
2862
+ "mnli_mismatched+guaranteed true": 0,
2863
+ "mnli_mismatched+guaranteed/possible/impossible": 0,
2864
+ "mnli_mismatched+justified in saying": 0,
2865
+ "mnli_mismatched+must be true": 0,
2866
+ "mnli_mismatched+should assume": 0,
2867
+ "mnli_mismatched+take the following as truth": 0,
2868
+ "mrpc": 0,
2869
+ "multirc": 1,
2870
+ "multirc+I was going to say\u2026": 0,
2871
+ "multirc+Would it be good to answer\u2026": 0,
2872
+ "multirc+confirm": 0,
2873
+ "multirc+correct": 0,
2874
+ "multirc+decide_valid": 0,
2875
+ "multirc+found_this_answer": 0,
2876
+ "multirc+grading": 0,
2877
+ "multirc+is the correct answer\u2026": 0,
2878
+ "multirc+is\u2026 a correct answer?": 0,
2879
+ "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": 0,
2880
+ "openbookqa": 0,
2881
+ "piqa": 0,
2882
+ "prost": 0,
2883
+ "pubmedqa": 0,
2884
+ "qnli": 0,
2885
+ "qqp": 0,
2886
+ "qqp+answer": 0,
2887
+ "qqp+duplicate": 0,
2888
+ "qqp+duplicate or not": 0,
2889
+ "qqp+meaning": 0,
2890
+ "qqp+quora": 0,
2891
+ "qqp+same thing": 0,
2892
+ "race": 1,
2893
+ "rte": 0,
2894
+ "rte+does the claim\u2026 follow the fact\u2026": 0,
2895
+ "rte+entailment explained": 0,
2896
+ "rte+imply": 0,
2897
+ "rte+imply separated": 0,
2898
+ "rte+mean": 0,
2899
+ "sciq": 0,
2900
+ "sst": 0,
2901
+ "sst+following positive negative": 0,
2902
+ "sst+happy or mad": 0,
2903
+ "sst+positive negative after": 0,
2904
+ "sst+review": 0,
2905
+ "sst+said": 0,
2906
+ "triviaqa": 0,
2907
+ "webqs": 0,
2908
+ "wic": 0,
2909
+ "wic+GPT-3-prompt": 0,
2910
+ "wic+GPT-3-prompt-with-label": 0,
2911
+ "wic+affirmation_true_or_false": 0,
2912
+ "wic+grammar_homework": 0,
2913
+ "wic+polysemous": 0,
2914
+ "wic+question-context": 0,
2915
+ "wic+question-context-meaning": 0,
2916
+ "wic+question-context-meaning-with-label": 0,
2917
+ "wic+same_sense": 0,
2918
+ "wic+similar-sense": 0,
2919
+ "winogrande": 0,
2920
+ "wnli": 1,
2921
+ "wnli+confident": 1,
2922
+ "wnli+entailment explained": 1,
2923
+ "wnli+imply": 1,
2924
+ "wnli+justified": 1,
2925
+ "wnli+mean": 1,
2926
+ "wsc": 0,
2927
+ "wsc+GPT-3 Style": 0,
2928
+ "wsc+I think they mean": 0,
2929
+ "wsc+Who or what is/are": 0,
2930
+ "wsc+by p they mean": 0,
2931
+ "wsc+does p stand for": 0,
2932
+ "wsc+does the pronoun refer to": 0,
2933
+ "wsc+in other words": 0,
2934
+ "wsc+p is/are r": 0,
2935
+ "wsc+replaced with": 0,
2936
+ "wsc+the pronoun refers to": 0
2937
+ }
2938
+ }
evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11-1b3-ml-evalharness-results_lm-eval_global_step340500_2022-07-13-11-29-13.json ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "arc_challenge": {
4
+ "acc": 0.23464163822525597,
5
+ "acc_stderr": 0.012383873560768673,
6
+ "acc_norm": 0.26791808873720135,
7
+ "acc_norm_stderr": 0.012942030195136423
8
+ },
9
+ "arc_easy": {
10
+ "acc": 0.5631313131313131,
11
+ "acc_stderr": 0.010177672928157678,
12
+ "acc_norm": 0.4810606060606061,
13
+ "acc_norm_stderr": 0.010252420496894487
14
+ },
15
+ "boolq": {
16
+ "acc": 0.617737003058104,
17
+ "acc_stderr": 0.008499149690449272
18
+ },
19
+ "copa": {
20
+ "acc": 0.7,
21
+ "acc_stderr": 0.046056618647183814
22
+ },
23
+ "headqa": {
24
+ "acc": 0.25419401896425964,
25
+ "acc_stderr": 0.008316509290190668,
26
+ "acc_norm": 0.29576951130561635,
27
+ "acc_norm_stderr": 0.008717251898361422
28
+ },
29
+ "hellaswag": {
30
+ "acc": 0.37621987651862177,
31
+ "acc_stderr": 0.004834461997944872,
32
+ "acc_norm": 0.46564429396534557,
33
+ "acc_norm_stderr": 0.004977988452502641
34
+ },
35
+ "lambada": {
36
+ "ppl": 12.583447597222621,
37
+ "ppl_stderr": 0.4021518609838198,
38
+ "acc": 0.46322530564719583,
39
+ "acc_stderr": 0.006947110835634445
40
+ },
41
+ "logiqa": {
42
+ "acc": 0.21658986175115208,
43
+ "acc_stderr": 0.016156860583178303,
44
+ "acc_norm": 0.28110599078341014,
45
+ "acc_norm_stderr": 0.017632374626460005
46
+ },
47
+ "mathqa": {
48
+ "acc": 0.2489112227805695,
49
+ "acc_stderr": 0.007915319798861361,
50
+ "acc_norm": 0.2422110552763819,
51
+ "acc_norm_stderr": 0.007842810183504986
52
+ },
53
+ "mc_taco": {
54
+ "em": 0.12537537537537538,
55
+ "f1": 0.4747075325110886
56
+ },
57
+ "mrpc": {
58
+ "acc": 0.6813725490196079,
59
+ "acc_stderr": 0.023095996571841474,
60
+ "f1": 0.8104956268221574,
61
+ "f1_stderr": 0.016329211455484924
62
+ },
63
+ "multirc": {
64
+ "acc": 0.011542497376705142,
65
+ "acc_stderr": 0.003461867320927179
66
+ },
67
+ "openbookqa": {
68
+ "acc": 0.214,
69
+ "acc_stderr": 0.01835979750238702,
70
+ "acc_norm": 0.298,
71
+ "acc_norm_stderr": 0.020475118092988978
72
+ },
73
+ "piqa": {
74
+ "acc": 0.6871599564744287,
75
+ "acc_stderr": 0.010817714425701112,
76
+ "acc_norm": 0.7002176278563657,
77
+ "acc_norm_stderr": 0.010689686967138092
78
+ },
79
+ "prost": {
80
+ "acc": 0.23505550811272416,
81
+ "acc_stderr": 0.0030979423271461875,
82
+ "acc_norm": 0.2670260461144321,
83
+ "acc_norm_stderr": 0.0032321702981822874
84
+ },
85
+ "pubmedqa": {
86
+ "acc": 0.56,
87
+ "acc_stderr": 0.015704987954361798
88
+ },
89
+ "qnli": {
90
+ "acc": 0.4962474830679114,
91
+ "acc_stderr": 0.006765220016415222
92
+ },
93
+ "qqp": {
94
+ "acc": 0.3681424684640119,
95
+ "acc_stderr": 0.0023986729832071816,
96
+ "f1": 0.5381138352498734,
97
+ "f1_stderr": 0.002555831569895799
98
+ },
99
+ "race": {
100
+ "acc": 0.3320574162679426,
101
+ "acc_stderr": 0.014575582129545914
102
+ },
103
+ "rte": {
104
+ "acc": 0.5342960288808665,
105
+ "acc_stderr": 0.030025579819366426
106
+ },
107
+ "sciq": {
108
+ "acc": 0.853,
109
+ "acc_stderr": 0.011203415395160335,
110
+ "acc_norm": 0.771,
111
+ "acc_norm_stderr": 0.013294199326613609
112
+ },
113
+ "sst": {
114
+ "acc": 0.6823394495412844,
115
+ "acc_stderr": 0.015775124845202545
116
+ },
117
+ "triviaqa": {
118
+ "acc": 0.0313798285158667,
119
+ "acc_stderr": 0.0016392014864795154
120
+ },
121
+ "webqs": {
122
+ "acc": 0.012795275590551181,
123
+ "acc_stderr": 0.0024938680596856277
124
+ },
125
+ "wic": {
126
+ "acc": 0.5,
127
+ "acc_stderr": 0.01981072129375818
128
+ },
129
+ "winogrande": {
130
+ "acc": 0.5730071033938438,
131
+ "acc_stderr": 0.013901878072575058
132
+ },
133
+ "wnli": {
134
+ "acc": 0.43661971830985913,
135
+ "acc_stderr": 0.0592793555841297
136
+ },
137
+ "wsc": {
138
+ "acc": 0.36538461538461536,
139
+ "acc_stderr": 0.0474473339327792
140
+ }
141
+ },
142
+ "versions": {
143
+ "arc_challenge": 0,
144
+ "arc_easy": 0,
145
+ "boolq": 1,
146
+ "copa": 0,
147
+ "headqa": 0,
148
+ "hellaswag": 0,
149
+ "lambada": 0,
150
+ "logiqa": 0,
151
+ "mathqa": 0,
152
+ "mc_taco": 0,
153
+ "mrpc": 0,
154
+ "multirc": 1,
155
+ "openbookqa": 0,
156
+ "piqa": 0,
157
+ "prost": 0,
158
+ "pubmedqa": 0,
159
+ "qnli": 0,
160
+ "qqp": 0,
161
+ "race": 1,
162
+ "rte": 0,
163
+ "sciq": 0,
164
+ "sst": 0,
165
+ "triviaqa": 0,
166
+ "webqs": 0,
167
+ "wic": 0,
168
+ "winogrande": 0,
169
+ "wnli": 1,
170
+ "wsc": 0
171
+ }
172
+ }
evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-12-22-45-57.json ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/results/tr11/bloom1b3/bslmevalfiles/tr11b-1b3-ml-bsevalharness-results_lm-eval_global_step340500_2022-07-15-11-47-34.json ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/results/tr11/bloom1b3/humaneval_temp02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.04033536585365854, "pass@10": 0.06579071150715766, "pass@100": 0.08764228719065376}
evaluation/results/tr11/bloom1b3/humaneval_temp06.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.031249999999999993, "pass@10": 0.07447701667197712, "pass@100": 0.1253791767704454}
evaluation/results/tr11/bloom1b3/humaneval_temp08.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.023475609756097564, "pass@10": 0.06591235746713595, "pass@100": 0.12748827115496364}
evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-12-23-19-06.json ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-13-19-42-29.json ADDED
@@ -0,0 +1,1917 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": [
3
+ {
4
+ "task_name": "multirc",
5
+ "prompt_name": "I was going to say\u2026",
6
+ "acc": 0.5724009900990099,
7
+ "fixed_answer_choice_list": [
8
+ "No",
9
+ "Yes"
10
+ ],
11
+ "dataset_path": "super_glue",
12
+ "dataset_name": "multirc",
13
+ "subset": null,
14
+ "prompt_id": "d2d78b88-8845-45b5-935a-6451da00b285",
15
+ "prompt_jinja": "{{ paragraph }}\n{{ question }} \nI was going to say \"{{ answer }}\". Does that sound right? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
16
+ "prompt_original_task": true,
17
+ "comment": "",
18
+ "acc_stderr": 0.007106111600745623
19
+ },
20
+ {
21
+ "task_name": "multirc",
22
+ "prompt_name": "I was going to say\u2026",
23
+ "acc_norm": 0.42883663366336633,
24
+ "fixed_answer_choice_list": [
25
+ "No",
26
+ "Yes"
27
+ ],
28
+ "dataset_path": "super_glue",
29
+ "dataset_name": "multirc",
30
+ "subset": null,
31
+ "prompt_id": "d2d78b88-8845-45b5-935a-6451da00b285",
32
+ "prompt_jinja": "{{ paragraph }}\n{{ question }} \nI was going to say \"{{ answer }}\". Does that sound right? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
33
+ "prompt_original_task": true,
34
+ "comment": "",
35
+ "acc_norm_stderr": 0.00710869042313772
36
+ },
37
+ {
38
+ "task_name": "multirc",
39
+ "prompt_name": "Would it be good to answer\u2026",
40
+ "acc": 0.5204207920792079,
41
+ "fixed_answer_choice_list": [
42
+ "No",
43
+ "Yes"
44
+ ],
45
+ "dataset_path": "super_glue",
46
+ "dataset_name": "multirc",
47
+ "subset": null,
48
+ "prompt_id": "4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b",
49
+ "prompt_jinja": "{{ paragraph }}\n{{ question }} \nWould it be good to answer \"{{ answer }}\"? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
50
+ "prompt_original_task": true,
51
+ "comment": "",
52
+ "acc_stderr": 0.0071758108566598
53
+ },
54
+ {
55
+ "task_name": "multirc",
56
+ "prompt_name": "Would it be good to answer\u2026",
57
+ "acc_norm": 0.43337458745874585,
58
+ "fixed_answer_choice_list": [
59
+ "No",
60
+ "Yes"
61
+ ],
62
+ "dataset_path": "super_glue",
63
+ "dataset_name": "multirc",
64
+ "subset": null,
65
+ "prompt_id": "4fc9e1ea-7451-4dba-a2cb-ce870e35ef8b",
66
+ "prompt_jinja": "{{ paragraph }}\n{{ question }} \nWould it be good to answer \"{{ answer }}\"? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
67
+ "prompt_original_task": true,
68
+ "comment": "",
69
+ "acc_norm_stderr": 0.00711775827463544
70
+ },
71
+ {
72
+ "task_name": "multirc",
73
+ "prompt_name": "confirm",
74
+ "acc": 0.4329620462046205,
75
+ "fixed_answer_choice_list": [
76
+ "No",
77
+ "Yes"
78
+ ],
79
+ "dataset_path": "super_glue",
80
+ "dataset_name": "multirc",
81
+ "subset": null,
82
+ "prompt_id": "b63fd1c3-b4a6-43c3-8429-6a389235b2a4",
83
+ "prompt_jinja": "{{paragraph}}\n\nQuestion: {{question}}\nI think \"{{answer}}\" is a valid answer. Could you confirm? Yes or no?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
84
+ "prompt_original_task": true,
85
+ "comment": "",
86
+ "acc_stderr": 0.007116959070151668
87
+ },
88
+ {
89
+ "task_name": "multirc",
90
+ "prompt_name": "confirm",
91
+ "acc_norm": 0.4280115511551155,
92
+ "fixed_answer_choice_list": [
93
+ "No",
94
+ "Yes"
95
+ ],
96
+ "dataset_path": "super_glue",
97
+ "dataset_name": "multirc",
98
+ "subset": null,
99
+ "prompt_id": "b63fd1c3-b4a6-43c3-8429-6a389235b2a4",
100
+ "prompt_jinja": "{{paragraph}}\n\nQuestion: {{question}}\nI think \"{{answer}}\" is a valid answer. Could you confirm? Yes or no?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
101
+ "prompt_original_task": true,
102
+ "comment": "",
103
+ "acc_norm_stderr": 0.007106976252751536
104
+ },
105
+ {
106
+ "task_name": "multirc",
107
+ "prompt_name": "correct",
108
+ "acc": 0.5721947194719472,
109
+ "fixed_answer_choice_list": [
110
+ "No",
111
+ "Yes"
112
+ ],
113
+ "dataset_path": "super_glue",
114
+ "dataset_name": "multirc",
115
+ "subset": null,
116
+ "prompt_id": "ae9b2b0b-1731-4370-adcc-36c4a959490d",
117
+ "prompt_jinja": "Is \"{{answer}}\" a correct answer to the following question?\nQuestion: {{question}}\n\nRely on the following text: {{paragraph}}\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
118
+ "prompt_original_task": true,
119
+ "comment": "",
120
+ "acc_stderr": 0.007106544557507229
121
+ },
122
+ {
123
+ "task_name": "multirc",
124
+ "prompt_name": "correct",
125
+ "acc_norm": 0.4709158415841584,
126
+ "fixed_answer_choice_list": [
127
+ "No",
128
+ "Yes"
129
+ ],
130
+ "dataset_path": "super_glue",
131
+ "dataset_name": "multirc",
132
+ "subset": null,
133
+ "prompt_id": "ae9b2b0b-1731-4370-adcc-36c4a959490d",
134
+ "prompt_jinja": "Is \"{{answer}}\" a correct answer to the following question?\nQuestion: {{question}}\n\nRely on the following text: {{paragraph}}\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
135
+ "prompt_original_task": true,
136
+ "comment": "",
137
+ "acc_norm_stderr": 0.00716964280499065
138
+ },
139
+ {
140
+ "task_name": "multirc",
141
+ "prompt_name": "decide_valid",
142
+ "acc": 0.5375412541254125,
143
+ "fixed_answer_choice_list": [
144
+ "No",
145
+ "Yes"
146
+ ],
147
+ "dataset_path": "super_glue",
148
+ "dataset_name": "multirc",
149
+ "subset": null,
150
+ "prompt_id": "7bf537ea-ff8d-44c7-8fc9-305b35e3be66",
151
+ "prompt_jinja": "{{paragraph}}\n\nDecide whether \"{{answer}}\" is a valid answer to the following question: {{question}}\nAnswer yes or no.\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
152
+ "prompt_original_task": true,
153
+ "comment": "",
154
+ "acc_stderr": 0.007161531207958062
155
+ },
156
+ {
157
+ "task_name": "multirc",
158
+ "prompt_name": "decide_valid",
159
+ "acc_norm": 0.4280115511551155,
160
+ "fixed_answer_choice_list": [
161
+ "No",
162
+ "Yes"
163
+ ],
164
+ "dataset_path": "super_glue",
165
+ "dataset_name": "multirc",
166
+ "subset": null,
167
+ "prompt_id": "7bf537ea-ff8d-44c7-8fc9-305b35e3be66",
168
+ "prompt_jinja": "{{paragraph}}\n\nDecide whether \"{{answer}}\" is a valid answer to the following question: {{question}}\nAnswer yes or no.\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
169
+ "prompt_original_task": true,
170
+ "comment": "",
171
+ "acc_norm_stderr": 0.007106976252751536
172
+ },
173
+ {
174
+ "task_name": "multirc",
175
+ "prompt_name": "found_this_answer",
176
+ "acc": 0.4773102310231023,
177
+ "fixed_answer_choice_list": [
178
+ "No",
179
+ "Yes"
180
+ ],
181
+ "dataset_path": "super_glue",
182
+ "dataset_name": "multirc",
183
+ "subset": null,
184
+ "prompt_id": "2d95962b-a545-41ae-8d76-07ee6704ef65",
185
+ "prompt_jinja": "{{paragraph}}\n\nQuestion: {{question}}\nI found this answer \"{{answer}}\". Is that correct? Yes or no?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
186
+ "prompt_original_task": true,
187
+ "comment": "",
188
+ "acc_stderr": 0.007174404542630741
189
+ },
190
+ {
191
+ "task_name": "multirc",
192
+ "prompt_name": "found_this_answer",
193
+ "acc_norm": 0.4280115511551155,
194
+ "fixed_answer_choice_list": [
195
+ "No",
196
+ "Yes"
197
+ ],
198
+ "dataset_path": "super_glue",
199
+ "dataset_name": "multirc",
200
+ "subset": null,
201
+ "prompt_id": "2d95962b-a545-41ae-8d76-07ee6704ef65",
202
+ "prompt_jinja": "{{paragraph}}\n\nQuestion: {{question}}\nI found this answer \"{{answer}}\". Is that correct? Yes or no?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
203
+ "prompt_original_task": true,
204
+ "comment": "",
205
+ "acc_norm_stderr": 0.007106976252751536
206
+ },
207
+ {
208
+ "task_name": "multirc",
209
+ "prompt_name": "grading",
210
+ "acc": 0.5874587458745875,
211
+ "fixed_answer_choice_list": [
212
+ "No",
213
+ "Yes"
214
+ ],
215
+ "dataset_path": "super_glue",
216
+ "dataset_name": "multirc",
217
+ "subset": null,
218
+ "prompt_id": "431a5c97-af33-4053-83c8-afb0dfc04448",
219
+ "prompt_jinja": "{{paragraph}}\nQuestion: {{question}}\n\nI am grading my students' exercises. Is the answer \"{{answer}}\" correct?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
220
+ "prompt_original_task": true,
221
+ "comment": "",
222
+ "acc_stderr": 0.007071081930208332
223
+ },
224
+ {
225
+ "task_name": "multirc",
226
+ "prompt_name": "grading",
227
+ "acc_norm": 0.4280115511551155,
228
+ "fixed_answer_choice_list": [
229
+ "No",
230
+ "Yes"
231
+ ],
232
+ "dataset_path": "super_glue",
233
+ "dataset_name": "multirc",
234
+ "subset": null,
235
+ "prompt_id": "431a5c97-af33-4053-83c8-afb0dfc04448",
236
+ "prompt_jinja": "{{paragraph}}\nQuestion: {{question}}\n\nI am grading my students' exercises. Is the answer \"{{answer}}\" correct?\n|||\n{% if label != -1 %}{{answer_choices[label]}}{% endif %}",
237
+ "prompt_original_task": true,
238
+ "comment": "",
239
+ "acc_norm_stderr": 0.007106976252751536
240
+ },
241
+ {
242
+ "task_name": "multirc",
243
+ "prompt_name": "is the correct answer\u2026",
244
+ "acc": 0.5478547854785478,
245
+ "fixed_answer_choice_list": [
246
+ "No",
247
+ "Yes"
248
+ ],
249
+ "dataset_path": "super_glue",
250
+ "dataset_name": "multirc",
251
+ "subset": null,
252
+ "prompt_id": "7d878b89-2774-429a-82fb-ac801379e3ae",
253
+ "prompt_jinja": "{{ paragraph }}\nQuestion: {{ question }} \nIs the correct answer {{ answer }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
254
+ "prompt_original_task": true,
255
+ "comment": "",
256
+ "acc_stderr": 0.007148833615093023
257
+ },
258
+ {
259
+ "task_name": "multirc",
260
+ "prompt_name": "is the correct answer\u2026",
261
+ "acc_norm": 0.4278052805280528,
262
+ "fixed_answer_choice_list": [
263
+ "No",
264
+ "Yes"
265
+ ],
266
+ "dataset_path": "super_glue",
267
+ "dataset_name": "multirc",
268
+ "subset": null,
269
+ "prompt_id": "7d878b89-2774-429a-82fb-ac801379e3ae",
270
+ "prompt_jinja": "{{ paragraph }}\nQuestion: {{ question }} \nIs the correct answer {{ answer }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
271
+ "prompt_original_task": true,
272
+ "comment": "",
273
+ "acc_norm_stderr": 0.007106544557507229
274
+ },
275
+ {
276
+ "task_name": "multirc",
277
+ "prompt_name": "is\u2026 a correct answer?",
278
+ "acc": 0.45028877887788776,
279
+ "fixed_answer_choice_list": [
280
+ "No",
281
+ "Yes"
282
+ ],
283
+ "dataset_path": "super_glue",
284
+ "dataset_name": "multirc",
285
+ "subset": null,
286
+ "prompt_id": "42d47df9-09de-4691-8e49-7cfadd636cdd",
287
+ "prompt_jinja": "{{ paragraph }}\nBased on the previous passage, {{ question }} \nIs \"{{ answer }}\" a correct answer? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
288
+ "prompt_original_task": true,
289
+ "comment": "",
290
+ "acc_stderr": 0.007146219530521704
291
+ },
292
+ {
293
+ "task_name": "multirc",
294
+ "prompt_name": "is\u2026 a correct answer?",
295
+ "acc_norm": 0.4280115511551155,
296
+ "fixed_answer_choice_list": [
297
+ "No",
298
+ "Yes"
299
+ ],
300
+ "dataset_path": "super_glue",
301
+ "dataset_name": "multirc",
302
+ "subset": null,
303
+ "prompt_id": "42d47df9-09de-4691-8e49-7cfadd636cdd",
304
+ "prompt_jinja": "{{ paragraph }}\nBased on the previous passage, {{ question }} \nIs \"{{ answer }}\" a correct answer? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
305
+ "prompt_original_task": true,
306
+ "comment": "",
307
+ "acc_norm_stderr": 0.007106976252751536
308
+ },
309
+ {
310
+ "task_name": "multirc",
311
+ "prompt_name": "paragraph\u2026 question\u2026 is it\u2026 ?",
312
+ "acc": 0.5581683168316832,
313
+ "fixed_answer_choice_list": [
314
+ "No",
315
+ "Yes"
316
+ ],
317
+ "dataset_path": "super_glue",
318
+ "dataset_name": "multirc",
319
+ "subset": null,
320
+ "prompt_id": "59a2d847-27f3-4002-a125-cf9a291b3098",
321
+ "prompt_jinja": "{{ paragraph }}\nQuestion: {{ question }} \nIs it {{ answer }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
322
+ "prompt_original_task": true,
323
+ "comment": "",
324
+ "acc_stderr": 0.007133037518848498
325
+ },
326
+ {
327
+ "task_name": "multirc",
328
+ "prompt_name": "paragraph\u2026 question\u2026 is it\u2026 ?",
329
+ "acc_norm": 0.429042904290429,
330
+ "fixed_answer_choice_list": [
331
+ "No",
332
+ "Yes"
333
+ ],
334
+ "dataset_path": "super_glue",
335
+ "dataset_name": "multirc",
336
+ "subset": null,
337
+ "prompt_id": "59a2d847-27f3-4002-a125-cf9a291b3098",
338
+ "prompt_jinja": "{{ paragraph }}\nQuestion: {{ question }} \nIs it {{ answer }}? ||| {% if label != -1 %}{{ answer_choices[label] }}{% endif %}",
339
+ "prompt_original_task": true,
340
+ "comment": "",
341
+ "acc_norm_stderr": 0.007109115814226985
342
+ },
343
+ {
344
+ "task_name": "qqp",
345
+ "prompt_name": "answer",
346
+ "acc": 0.4095720999257977,
347
+ "fixed_answer_choice_list": [
348
+ "no",
349
+ "yes"
350
+ ],
351
+ "dataset_path": "glue",
352
+ "dataset_name": "qqp",
353
+ "subset": null,
354
+ "prompt_id": "c0182cd1-c7ac-4abe-829f-4651536af951",
355
+ "prompt_jinja": "Can an answer to \"{{question1}}\" also be used to answer \"{{question2}}\"? ||| {{ answer_choices[label] }}",
356
+ "prompt_original_task": false,
357
+ "comment": "",
358
+ "acc_stderr": 0.0024456940020775335
359
+ },
360
+ {
361
+ "task_name": "qqp",
362
+ "prompt_name": "answer",
363
+ "acc_norm": 0.36816720257234725,
364
+ "fixed_answer_choice_list": [
365
+ "no",
366
+ "yes"
367
+ ],
368
+ "dataset_path": "glue",
369
+ "dataset_name": "qqp",
370
+ "subset": null,
371
+ "prompt_id": "c0182cd1-c7ac-4abe-829f-4651536af951",
372
+ "prompt_jinja": "Can an answer to \"{{question1}}\" also be used to answer \"{{question2}}\"? ||| {{ answer_choices[label] }}",
373
+ "prompt_original_task": false,
374
+ "comment": "",
375
+ "acc_norm_stderr": 0.002398706610614492
376
+ },
377
+ {
378
+ "task_name": "qqp",
379
+ "prompt_name": "duplicate",
380
+ "acc": 0.5389809547365817,
381
+ "fixed_answer_choice_list": [
382
+ "no",
383
+ "yes"
384
+ ],
385
+ "dataset_path": "glue",
386
+ "dataset_name": "qqp",
387
+ "subset": null,
388
+ "prompt_id": "fd244bd3-ca3b-4e4f-9722-fd006c50e157",
389
+ "prompt_jinja": "I received the questions \"{{question1}}\" and \"{{question2}}\". Are they duplicates? ||| {{ answer_choices[label] }}",
390
+ "prompt_original_task": true,
391
+ "comment": "",
392
+ "acc_stderr": 0.0024791319564636633
393
+ },
394
+ {
395
+ "task_name": "qqp",
396
+ "prompt_name": "duplicate",
397
+ "acc_norm": 0.36816720257234725,
398
+ "fixed_answer_choice_list": [
399
+ "no",
400
+ "yes"
401
+ ],
402
+ "dataset_path": "glue",
403
+ "dataset_name": "qqp",
404
+ "subset": null,
405
+ "prompt_id": "fd244bd3-ca3b-4e4f-9722-fd006c50e157",
406
+ "prompt_jinja": "I received the questions \"{{question1}}\" and \"{{question2}}\". Are they duplicates? ||| {{ answer_choices[label] }}",
407
+ "prompt_original_task": true,
408
+ "comment": "",
409
+ "acc_norm_stderr": 0.002398706610614492
410
+ },
411
+ {
412
+ "task_name": "qqp",
413
+ "prompt_name": "duplicate or not",
414
+ "acc": 0.3811526094484294,
415
+ "fixed_answer_choice_list": [
416
+ "not duplicates",
417
+ "duplicates"
418
+ ],
419
+ "dataset_path": "glue",
420
+ "dataset_name": "qqp",
421
+ "subset": null,
422
+ "prompt_id": "94972071-a726-42a3-a726-13f414b65e67",
423
+ "prompt_jinja": "{{question1}}\n{{question2}}\nPick one: These questions are \"{{\"duplicates\"}}\" or \"{{\"not duplicates\"}}\".\n|||\n{{ answer_choices[label] }}",
424
+ "prompt_original_task": true,
425
+ "comment": "",
426
+ "acc_stderr": 0.0024154315297388092
427
+ },
428
+ {
429
+ "task_name": "qqp",
430
+ "prompt_name": "duplicate or not",
431
+ "acc_norm": 0.6317585951026465,
432
+ "fixed_answer_choice_list": [
433
+ "not duplicates",
434
+ "duplicates"
435
+ ],
436
+ "dataset_path": "glue",
437
+ "dataset_name": "qqp",
438
+ "subset": null,
439
+ "prompt_id": "94972071-a726-42a3-a726-13f414b65e67",
440
+ "prompt_jinja": "{{question1}}\n{{question2}}\nPick one: These questions are \"{{\"duplicates\"}}\" or \"{{\"not duplicates\"}}\".\n|||\n{{ answer_choices[label] }}",
441
+ "prompt_original_task": true,
442
+ "comment": "",
443
+ "acc_norm_stderr": 0.00239880745215712
444
+ },
445
+ {
446
+ "task_name": "qqp",
447
+ "prompt_name": "meaning",
448
+ "acc": 0.3842443729903537,
449
+ "fixed_answer_choice_list": [
450
+ "No",
451
+ "Yes"
452
+ ],
453
+ "dataset_path": "glue",
454
+ "dataset_name": "qqp",
455
+ "subset": null,
456
+ "prompt_id": "c0724198-97e7-44a1-89d8-c51e97ce0b04",
457
+ "prompt_jinja": "Question 1: {{question1}}\nQuestion 2: {{question2}}\n\nDo these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}",
458
+ "prompt_original_task": true,
459
+ "comment": "",
460
+ "acc_stderr": 0.0024191425100536248
461
+ },
462
+ {
463
+ "task_name": "qqp",
464
+ "prompt_name": "meaning",
465
+ "acc_norm": 0.36816720257234725,
466
+ "fixed_answer_choice_list": [
467
+ "No",
468
+ "Yes"
469
+ ],
470
+ "dataset_path": "glue",
471
+ "dataset_name": "qqp",
472
+ "subset": null,
473
+ "prompt_id": "c0724198-97e7-44a1-89d8-c51e97ce0b04",
474
+ "prompt_jinja": "Question 1: {{question1}}\nQuestion 2: {{question2}}\n\nDo these two questions convey the same meaning? Yes or no? ||| {{answer_choices[label]}}",
475
+ "prompt_original_task": true,
476
+ "comment": "",
477
+ "acc_norm_stderr": 0.002398706610614492
478
+ },
479
+ {
480
+ "task_name": "qqp",
481
+ "prompt_name": "quora",
482
+ "acc": 0.36826613900568883,
483
+ "fixed_answer_choice_list": [
484
+ "no",
485
+ "yes"
486
+ ],
487
+ "dataset_path": "glue",
488
+ "dataset_name": "qqp",
489
+ "subset": null,
490
+ "prompt_id": "8e711799-a57c-4941-833b-466bedfb80ad",
491
+ "prompt_jinja": "I'm an administrator on the website Quora. There are two posts, one that asks \"{{question1}}\" and another that asks \"{{question2}}\". I can merge questions if they are asking the same thing. Can I merge these two questions? ||| {{ answer_choices[label] }}",
492
+ "prompt_original_task": true,
493
+ "comment": "",
494
+ "acc_stderr": 0.002398841052447127
495
+ },
496
+ {
497
+ "task_name": "qqp",
498
+ "prompt_name": "quora",
499
+ "acc_norm": 0.36816720257234725,
500
+ "fixed_answer_choice_list": [
501
+ "no",
502
+ "yes"
503
+ ],
504
+ "dataset_path": "glue",
505
+ "dataset_name": "qqp",
506
+ "subset": null,
507
+ "prompt_id": "8e711799-a57c-4941-833b-466bedfb80ad",
508
+ "prompt_jinja": "I'm an administrator on the website Quora. There are two posts, one that asks \"{{question1}}\" and another that asks \"{{question2}}\". I can merge questions if they are asking the same thing. Can I merge these two questions? ||| {{ answer_choices[label] }}",
509
+ "prompt_original_task": true,
510
+ "comment": "",
511
+ "acc_norm_stderr": 0.002398706610614492
512
+ },
513
+ {
514
+ "task_name": "qqp",
515
+ "prompt_name": "same thing",
516
+ "acc": 0.5813999505317833,
517
+ "fixed_answer_choice_list": [
518
+ "no",
519
+ "yes"
520
+ ],
521
+ "dataset_path": "glue",
522
+ "dataset_name": "qqp",
523
+ "subset": null,
524
+ "prompt_id": "a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b",
525
+ "prompt_jinja": "Are the questions \"{{question1}}\" and \"{{question2}}\" asking the same thing? ||| {{ answer_choices[label] }}",
526
+ "prompt_original_task": true,
527
+ "comment": "",
528
+ "acc_stderr": 0.0024535258231136925
529
+ },
530
+ {
531
+ "task_name": "qqp",
532
+ "prompt_name": "same thing",
533
+ "acc_norm": 0.36816720257234725,
534
+ "fixed_answer_choice_list": [
535
+ "no",
536
+ "yes"
537
+ ],
538
+ "dataset_path": "glue",
539
+ "dataset_name": "qqp",
540
+ "subset": null,
541
+ "prompt_id": "a45ad5cd-a3ba-4ab2-a728-a9ea0f27102b",
542
+ "prompt_jinja": "Are the questions \"{{question1}}\" and \"{{question2}}\" asking the same thing? ||| {{ answer_choices[label] }}",
543
+ "prompt_original_task": true,
544
+ "comment": "",
545
+ "acc_norm_stderr": 0.002398706610614492
546
+ },
547
+ {
548
+ "task_name": "rte",
549
+ "prompt_name": "does the claim\u2026 follow the fact\u2026",
550
+ "acc": 0.4729241877256318,
551
+ "fixed_answer_choice_list": [
552
+ "yes",
553
+ "no"
554
+ ],
555
+ "dataset_path": "glue",
556
+ "dataset_name": "rte",
557
+ "subset": null,
558
+ "prompt_id": "4ee6ff27-de63-4e7b-a9d4-82a17eba407a",
559
+ "prompt_jinja": "Does the claim \"{{sentence2}}\" follow from the fact that \"{{sentence1}}\"? Please answer either {{\"yes\"}} or {{\"no\"}}.\n|||\n{{answer_choices[label]}}",
560
+ "prompt_original_task": true,
561
+ "comment": "",
562
+ "acc_stderr": 0.0300523034631437
563
+ },
564
+ {
565
+ "task_name": "rte",
566
+ "prompt_name": "does the claim\u2026 follow the fact\u2026",
567
+ "acc_norm": 0.5270758122743683,
568
+ "fixed_answer_choice_list": [
569
+ "yes",
570
+ "no"
571
+ ],
572
+ "dataset_path": "glue",
573
+ "dataset_name": "rte",
574
+ "subset": null,
575
+ "prompt_id": "4ee6ff27-de63-4e7b-a9d4-82a17eba407a",
576
+ "prompt_jinja": "Does the claim \"{{sentence2}}\" follow from the fact that \"{{sentence1}}\"? Please answer either {{\"yes\"}} or {{\"no\"}}.\n|||\n{{answer_choices[label]}}",
577
+ "prompt_original_task": true,
578
+ "comment": "",
579
+ "acc_norm_stderr": 0.0300523034631437
580
+ },
581
+ {
582
+ "task_name": "rte",
583
+ "prompt_name": "entailment explained",
584
+ "acc": 0.516245487364621,
585
+ "fixed_answer_choice_list": [
586
+ "entailment",
587
+ "not entailment"
588
+ ],
589
+ "dataset_path": "glue",
590
+ "dataset_name": "rte",
591
+ "subset": null,
592
+ "prompt_id": "9e2b4267-ec23-44c8-b82a-107e2c890fec",
593
+ "prompt_jinja": "We say that one sentence \"{{\"entails\"}}\" another sentence when the first sentence implies the second sentence. Consider the following two sentences:\n{{sentence1}}\n{{sentence2}}\nIs the relationship from the first to the second sentence \"{{\"entailment\"}}\" or \"{{\"not entailment\"}}\"?\n|||\n{{answer_choices[label]}}",
594
+ "prompt_original_task": true,
595
+ "comment": "",
596
+ "acc_stderr": 0.030080573208738064
597
+ },
598
+ {
599
+ "task_name": "rte",
600
+ "prompt_name": "entailment explained",
601
+ "acc_norm": 0.4729241877256318,
602
+ "fixed_answer_choice_list": [
603
+ "entailment",
604
+ "not entailment"
605
+ ],
606
+ "dataset_path": "glue",
607
+ "dataset_name": "rte",
608
+ "subset": null,
609
+ "prompt_id": "9e2b4267-ec23-44c8-b82a-107e2c890fec",
610
+ "prompt_jinja": "We say that one sentence \"{{\"entails\"}}\" another sentence when the first sentence implies the second sentence. Consider the following two sentences:\n{{sentence1}}\n{{sentence2}}\nIs the relationship from the first to the second sentence \"{{\"entailment\"}}\" or \"{{\"not entailment\"}}\"?\n|||\n{{answer_choices[label]}}",
611
+ "prompt_original_task": true,
612
+ "comment": "",
613
+ "acc_norm_stderr": 0.0300523034631437
614
+ },
615
+ {
616
+ "task_name": "rte",
617
+ "prompt_name": "imply",
618
+ "acc": 0.47653429602888087,
619
+ "fixed_answer_choice_list": [
620
+ "yes",
621
+ "no"
622
+ ],
623
+ "dataset_path": "glue",
624
+ "dataset_name": "rte",
625
+ "subset": null,
626
+ "prompt_id": "c8dfc879-40f2-412d-be1e-4cd70107f6e6",
627
+ "prompt_jinja": "Does \"{{sentence1}}\" imply that \"{{sentence2}}\"? Please answer either {{\"yes\"}} or {{\"no\"}}.\n|||\n{{answer_choices[label]}}",
628
+ "prompt_original_task": true,
629
+ "comment": "",
630
+ "acc_stderr": 0.030063300411902652
631
+ },
632
+ {
633
+ "task_name": "rte",
634
+ "prompt_name": "imply",
635
+ "acc_norm": 0.5270758122743683,
636
+ "fixed_answer_choice_list": [
637
+ "yes",
638
+ "no"
639
+ ],
640
+ "dataset_path": "glue",
641
+ "dataset_name": "rte",
642
+ "subset": null,
643
+ "prompt_id": "c8dfc879-40f2-412d-be1e-4cd70107f6e6",
644
+ "prompt_jinja": "Does \"{{sentence1}}\" imply that \"{{sentence2}}\"? Please answer either {{\"yes\"}} or {{\"no\"}}.\n|||\n{{answer_choices[label]}}",
645
+ "prompt_original_task": true,
646
+ "comment": "",
647
+ "acc_norm_stderr": 0.0300523034631437
648
+ },
649
+ {
650
+ "task_name": "rte",
651
+ "prompt_name": "imply separated",
652
+ "acc": 0.4620938628158845,
653
+ "fixed_answer_choice_list": [
654
+ "yes",
655
+ "no"
656
+ ],
657
+ "dataset_path": "glue",
658
+ "dataset_name": "rte",
659
+ "subset": null,
660
+ "prompt_id": "f56ffced-9b16-431a-8a17-501e63cddf73",
661
+ "prompt_jinja": "{{sentence1}}\nDoes this imply\n{{sentence2}}\nPlease answer {{\"A) yes or B) no.\"}}\n|||\n{{answer_choices[label]}}",
662
+ "prompt_original_task": true,
663
+ "comment": "",
664
+ "acc_stderr": 0.03000984891252911
665
+ },
666
+ {
667
+ "task_name": "rte",
668
+ "prompt_name": "imply separated",
669
+ "acc_norm": 0.5270758122743683,
670
+ "fixed_answer_choice_list": [
671
+ "yes",
672
+ "no"
673
+ ],
674
+ "dataset_path": "glue",
675
+ "dataset_name": "rte",
676
+ "subset": null,
677
+ "prompt_id": "f56ffced-9b16-431a-8a17-501e63cddf73",
678
+ "prompt_jinja": "{{sentence1}}\nDoes this imply\n{{sentence2}}\nPlease answer {{\"A) yes or B) no.\"}}\n|||\n{{answer_choices[label]}}",
679
+ "prompt_original_task": true,
680
+ "comment": "",
681
+ "acc_norm_stderr": 0.0300523034631437
682
+ },
683
+ {
684
+ "task_name": "rte",
685
+ "prompt_name": "mean",
686
+ "acc": 0.47653429602888087,
687
+ "fixed_answer_choice_list": [
688
+ "yes",
689
+ "no"
690
+ ],
691
+ "dataset_path": "glue",
692
+ "dataset_name": "rte",
693
+ "subset": null,
694
+ "prompt_id": "03a7ae07-5ddd-46c4-92f3-2152223d44ec",
695
+ "prompt_jinja": "{{sentence1}}\nDoes this mean that \"{{sentence2}}\" is true? {{\"A) yes or B) no.\"}}\n|||\n{{answer_choices[label]}}",
696
+ "prompt_original_task": true,
697
+ "comment": "",
698
+ "acc_stderr": 0.030063300411902652
699
+ },
700
+ {
701
+ "task_name": "rte",
702
+ "prompt_name": "mean",
703
+ "acc_norm": 0.5270758122743683,
704
+ "fixed_answer_choice_list": [
705
+ "yes",
706
+ "no"
707
+ ],
708
+ "dataset_path": "glue",
709
+ "dataset_name": "rte",
710
+ "subset": null,
711
+ "prompt_id": "03a7ae07-5ddd-46c4-92f3-2152223d44ec",
712
+ "prompt_jinja": "{{sentence1}}\nDoes this mean that \"{{sentence2}}\" is true? {{\"A) yes or B) no.\"}}\n|||\n{{answer_choices[label]}}",
713
+ "prompt_original_task": true,
714
+ "comment": "",
715
+ "acc_norm_stderr": 0.0300523034631437
716
+ },
717
+ {
718
+ "task_name": "sst",
719
+ "prompt_name": "following positive negative",
720
+ "acc": 0.7603211009174312,
721
+ "fixed_answer_choice_list": [
722
+ "negative",
723
+ "positive"
724
+ ],
725
+ "dataset_path": "glue",
726
+ "dataset_name": "sst2",
727
+ "subset": null,
728
+ "prompt_id": "63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a",
729
+ "prompt_jinja": "Does the following sentence have a {{\"positive\"}} or {{\"negative\"}} sentiment?\n{{sentence}}\n|||\n{{ answer_choices[label] }}",
730
+ "prompt_original_task": true,
731
+ "comment": "",
732
+ "acc_stderr": 0.014464530608155847
733
+ },
734
+ {
735
+ "task_name": "sst",
736
+ "prompt_name": "following positive negative",
737
+ "acc_norm": 0.7603211009174312,
738
+ "fixed_answer_choice_list": [
739
+ "negative",
740
+ "positive"
741
+ ],
742
+ "dataset_path": "glue",
743
+ "dataset_name": "sst2",
744
+ "subset": null,
745
+ "prompt_id": "63c6b2be-8ecd-42ad-88c7-0d1dc1a8323a",
746
+ "prompt_jinja": "Does the following sentence have a {{\"positive\"}} or {{\"negative\"}} sentiment?\n{{sentence}}\n|||\n{{ answer_choices[label] }}",
747
+ "prompt_original_task": true,
748
+ "comment": "",
749
+ "acc_norm_stderr": 0.014464530608155847
750
+ },
751
+ {
752
+ "task_name": "sst",
753
+ "prompt_name": "happy or mad",
754
+ "acc": 0.5091743119266054,
755
+ "fixed_answer_choice_list": [
756
+ "bad",
757
+ "good"
758
+ ],
759
+ "dataset_path": "glue",
760
+ "dataset_name": "sst2",
761
+ "subset": null,
762
+ "prompt_id": "6dd74cd5-e074-4612-9e96-c17ca88c3bc4",
763
+ "prompt_jinja": "Someone sent me an email with the sentence \"{{sentence}}\". Do you think they are feeling {{\"good\"}} or {{\"bad\"}}? ||| {{ answer_choices[label] }}",
764
+ "prompt_original_task": true,
765
+ "comment": "",
766
+ "acc_stderr": 0.01693900152535154
767
+ },
768
+ {
769
+ "task_name": "sst",
770
+ "prompt_name": "happy or mad",
771
+ "acc_norm": 0.5091743119266054,
772
+ "fixed_answer_choice_list": [
773
+ "bad",
774
+ "good"
775
+ ],
776
+ "dataset_path": "glue",
777
+ "dataset_name": "sst2",
778
+ "subset": null,
779
+ "prompt_id": "6dd74cd5-e074-4612-9e96-c17ca88c3bc4",
780
+ "prompt_jinja": "Someone sent me an email with the sentence \"{{sentence}}\". Do you think they are feeling {{\"good\"}} or {{\"bad\"}}? ||| {{ answer_choices[label] }}",
781
+ "prompt_original_task": true,
782
+ "comment": "",
783
+ "acc_norm_stderr": 0.01693900152535154
784
+ },
785
+ {
786
+ "task_name": "sst",
787
+ "prompt_name": "positive negative after",
788
+ "acc": 0.5263761467889908,
789
+ "fixed_answer_choice_list": [
790
+ "negative",
791
+ "positive"
792
+ ],
793
+ "dataset_path": "glue",
794
+ "dataset_name": "sst2",
795
+ "subset": null,
796
+ "prompt_id": "11d1c505-9232-4c35-82a4-4c3642843e2e",
797
+ "prompt_jinja": "{{sentence}}\nQuestion: Was that sentence {{\"positive\"}} or {{\"negative\"}}? Answer: ||| {{ answer_choices[label] }}",
798
+ "prompt_original_task": true,
799
+ "comment": "",
800
+ "acc_stderr": 0.016918264333564144
801
+ },
802
+ {
803
+ "task_name": "sst",
804
+ "prompt_name": "positive negative after",
805
+ "acc_norm": 0.5263761467889908,
806
+ "fixed_answer_choice_list": [
807
+ "negative",
808
+ "positive"
809
+ ],
810
+ "dataset_path": "glue",
811
+ "dataset_name": "sst2",
812
+ "subset": null,
813
+ "prompt_id": "11d1c505-9232-4c35-82a4-4c3642843e2e",
814
+ "prompt_jinja": "{{sentence}}\nQuestion: Was that sentence {{\"positive\"}} or {{\"negative\"}}? Answer: ||| {{ answer_choices[label] }}",
815
+ "prompt_original_task": true,
816
+ "comment": "",
817
+ "acc_norm_stderr": 0.016918264333564144
818
+ },
819
+ {
820
+ "task_name": "sst",
821
+ "prompt_name": "review",
822
+ "acc": 0.5722477064220184,
823
+ "fixed_answer_choice_list": [
824
+ "negative",
825
+ "positive"
826
+ ],
827
+ "dataset_path": "glue",
828
+ "dataset_name": "sst2",
829
+ "subset": null,
830
+ "prompt_id": "228fcae7-7f4c-4e3c-9ac4-e49b26bc103d",
831
+ "prompt_jinja": "I'm reading a review that says \"{{sentence}}\".\n\nDo you think the review is {{\"positive\"}} or {{\"negative\"}}? ||| {{ answer_choices[label] }}",
832
+ "prompt_original_task": true,
833
+ "comment": "",
834
+ "acc_stderr": 0.016764056901835654
835
+ },
836
+ {
837
+ "task_name": "sst",
838
+ "prompt_name": "review",
839
+ "acc_norm": 0.5722477064220184,
840
+ "fixed_answer_choice_list": [
841
+ "negative",
842
+ "positive"
843
+ ],
844
+ "dataset_path": "glue",
845
+ "dataset_name": "sst2",
846
+ "subset": null,
847
+ "prompt_id": "228fcae7-7f4c-4e3c-9ac4-e49b26bc103d",
848
+ "prompt_jinja": "I'm reading a review that says \"{{sentence}}\".\n\nDo you think the review is {{\"positive\"}} or {{\"negative\"}}? ||| {{ answer_choices[label] }}",
849
+ "prompt_original_task": true,
850
+ "comment": "",
851
+ "acc_norm_stderr": 0.016764056901835654
852
+ },
853
+ {
854
+ "task_name": "sst",
855
+ "prompt_name": "said",
856
+ "acc": 0.5022935779816514,
857
+ "fixed_answer_choice_list": [
858
+ "sad",
859
+ "happy"
860
+ ],
861
+ "dataset_path": "glue",
862
+ "dataset_name": "sst2",
863
+ "subset": null,
864
+ "prompt_id": "5aa0cea9-0f8d-454d-b25b-b0d4cda273b8",
865
+ "prompt_jinja": "Someone just said to me \"{{sentence}}\".\n\nDo you think they are {{\"sad\"}} or {{\"happy\"}}? ||| {{ answer_choices[label] }}",
866
+ "prompt_original_task": true,
867
+ "comment": "",
868
+ "acc_stderr": 0.016941675443113525
869
+ },
870
+ {
871
+ "task_name": "sst",
872
+ "prompt_name": "said",
873
+ "acc_norm": 0.5091743119266054,
874
+ "fixed_answer_choice_list": [
875
+ "sad",
876
+ "happy"
877
+ ],
878
+ "dataset_path": "glue",
879
+ "dataset_name": "sst2",
880
+ "subset": null,
881
+ "prompt_id": "5aa0cea9-0f8d-454d-b25b-b0d4cda273b8",
882
+ "prompt_jinja": "Someone just said to me \"{{sentence}}\".\n\nDo you think they are {{\"sad\"}} or {{\"happy\"}}? ||| {{ answer_choices[label] }}",
883
+ "prompt_original_task": true,
884
+ "comment": "",
885
+ "acc_norm_stderr": 0.01693900152535154
886
+ },
887
+ {
888
+ "task_name": "tydiqa_primary",
889
+ "prompt_name": "en_after_reading_the_text",
890
+ "acc": 0.35064935064935066,
891
+ "fixed_answer_choice_list": [
892
+ "Yes",
893
+ "No"
894
+ ],
895
+ "dataset_path": "tydiqa",
896
+ "dataset_name": "primary_task",
897
+ "subset": null,
898
+ "prompt_id": "9c42e3fd-d46e-4149-bb60-4b3118104d95",
899
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAfter reading the following text snippet from Wikipedia, please answer the question: {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
900
+ "prompt_original_task": true,
901
+ "comment": "",
902
+ "acc_stderr": 0.054735534443086
903
+ },
904
+ {
905
+ "task_name": "tydiqa_primary",
906
+ "prompt_name": "en_after_reading_the_text",
907
+ "acc_norm": 0.6493506493506493,
908
+ "fixed_answer_choice_list": [
909
+ "Yes",
910
+ "No"
911
+ ],
912
+ "dataset_path": "tydiqa",
913
+ "dataset_name": "primary_task",
914
+ "subset": null,
915
+ "prompt_id": "9c42e3fd-d46e-4149-bb60-4b3118104d95",
916
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAfter reading the following text snippet from Wikipedia, please answer the question: {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
917
+ "prompt_original_task": true,
918
+ "comment": "",
919
+ "acc_norm_stderr": 0.054735534443086
920
+ },
921
+ {
922
+ "task_name": "tydiqa_primary",
923
+ "prompt_name": "en_based_on_the_text",
924
+ "acc": 0.33766233766233766,
925
+ "fixed_answer_choice_list": [
926
+ "Yes",
927
+ "No"
928
+ ],
929
+ "dataset_path": "tydiqa",
930
+ "dataset_name": "primary_task",
931
+ "subset": null,
932
+ "prompt_id": "e593017f-9bcf-4442-944d-fcdf2edcb4f7",
933
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nBased on the following text snippet, {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
934
+ "prompt_original_task": true,
935
+ "comment": "",
936
+ "acc_stderr": 0.05424681453014242
937
+ },
938
+ {
939
+ "task_name": "tydiqa_primary",
940
+ "prompt_name": "en_based_on_the_text",
941
+ "acc_norm": 0.6363636363636364,
942
+ "fixed_answer_choice_list": [
943
+ "Yes",
944
+ "No"
945
+ ],
946
+ "dataset_path": "tydiqa",
947
+ "dataset_name": "primary_task",
948
+ "subset": null,
949
+ "prompt_id": "e593017f-9bcf-4442-944d-fcdf2edcb4f7",
950
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nBased on the following text snippet, {{question_text}} \n{{document_plaintext}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
951
+ "prompt_original_task": true,
952
+ "comment": "",
953
+ "acc_norm_stderr": 0.055179725333353066
954
+ },
955
+ {
956
+ "task_name": "tydiqa_primary",
957
+ "prompt_name": "en_heres_what_I_found",
958
+ "acc": 0.03685741998060136,
959
+ "fixed_answer_choice_list": [
960
+ "Yes",
961
+ "No",
962
+ "None"
963
+ ],
964
+ "dataset_path": "tydiqa",
965
+ "dataset_name": "primary_task",
966
+ "subset": null,
967
+ "prompt_id": "16f11e56-a78d-4e33-bba1-586f9947baf7",
968
+ "prompt_jinja": "{% if language == \"english\" %}\nI wonder {{question_text}}.\nHelp me answer this question with \"{{answer_choices[0]}}\" or \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\" if none of the first two answers apply.\nHere's what I found on the internet:\nTopic: {{document_title}}\nArticle: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
969
+ "prompt_original_task": true,
970
+ "comment": "",
971
+ "acc_stderr": 0.005870689955728106
972
+ },
973
+ {
974
+ "task_name": "tydiqa_primary",
975
+ "prompt_name": "en_heres_what_I_found",
976
+ "acc_norm": 0.8661493695441319,
977
+ "fixed_answer_choice_list": [
978
+ "Yes",
979
+ "No",
980
+ "None"
981
+ ],
982
+ "dataset_path": "tydiqa",
983
+ "dataset_name": "primary_task",
984
+ "subset": null,
985
+ "prompt_id": "16f11e56-a78d-4e33-bba1-586f9947baf7",
986
+ "prompt_jinja": "{% if language == \"english\" %}\nI wonder {{question_text}}.\nHelp me answer this question with \"{{answer_choices[0]}}\" or \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\" if none of the first two answers apply.\nHere's what I found on the internet:\nTopic: {{document_title}}\nArticle: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
987
+ "prompt_original_task": true,
988
+ "comment": "",
989
+ "acc_norm_stderr": 0.010609330898735572
990
+ },
991
+ {
992
+ "task_name": "tydiqa_primary",
993
+ "prompt_name": "en_open_domain_qa",
994
+ "acc": 0.6753246753246753,
995
+ "fixed_answer_choice_list": [
996
+ "Yes",
997
+ "No"
998
+ ],
999
+ "dataset_path": "tydiqa",
1000
+ "dataset_name": "primary_task",
1001
+ "subset": null,
1002
+ "prompt_id": "b4f7c441-41b1-4665-93f9-f2e875aed92a",
1003
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question about {{document_title}}.\nQuestion: {{question_text}}. Yes or No?\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
1004
+ "prompt_original_task": false,
1005
+ "comment": "",
1006
+ "acc_stderr": 0.05371235012133188
1007
+ },
1008
+ {
1009
+ "task_name": "tydiqa_primary",
1010
+ "prompt_name": "en_open_domain_qa",
1011
+ "acc_norm": 0.6753246753246753,
1012
+ "fixed_answer_choice_list": [
1013
+ "Yes",
1014
+ "No"
1015
+ ],
1016
+ "dataset_path": "tydiqa",
1017
+ "dataset_name": "primary_task",
1018
+ "subset": null,
1019
+ "prompt_id": "b4f7c441-41b1-4665-93f9-f2e875aed92a",
1020
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question about {{document_title}}.\nQuestion: {{question_text}}. Yes or No?\n||| \n{{annotations.yes_no_answer[0] | capitalize}}\n {% endif %}\n{% endif %}",
1021
+ "prompt_original_task": false,
1022
+ "comment": "",
1023
+ "acc_norm_stderr": 0.05371235012133188
1024
+ },
1025
+ {
1026
+ "task_name": "tydiqa_primary",
1027
+ "prompt_name": "en_open_domain_qa_without_choices",
1028
+ "acc": 0.6753246753246753,
1029
+ "fixed_answer_choice_list": [
1030
+ "Yes",
1031
+ "No"
1032
+ ],
1033
+ "dataset_path": "tydiqa",
1034
+ "dataset_name": "primary_task",
1035
+ "subset": null,
1036
+ "prompt_id": "4b21e3be-fba4-49b7-beb1-a61de26eb0ac",
1037
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question about {{document_title}}. {{question_text}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}} \n {% endif %} \n{% endif %} ",
1038
+ "prompt_original_task": false,
1039
+ "comment": "",
1040
+ "acc_stderr": 0.05371235012133188
1041
+ },
1042
+ {
1043
+ "task_name": "tydiqa_primary",
1044
+ "prompt_name": "en_open_domain_qa_without_choices",
1045
+ "acc_norm": 0.6753246753246753,
1046
+ "fixed_answer_choice_list": [
1047
+ "Yes",
1048
+ "No"
1049
+ ],
1050
+ "dataset_path": "tydiqa",
1051
+ "dataset_name": "primary_task",
1052
+ "subset": null,
1053
+ "prompt_id": "4b21e3be-fba4-49b7-beb1-a61de26eb0ac",
1054
+ "prompt_jinja": "{% if language == \"english\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nAnswer the question about {{document_title}}. {{question_text}}\n||| \n{{annotations.yes_no_answer[0] | capitalize}} \n {% endif %} \n{% endif %} ",
1055
+ "prompt_original_task": false,
1056
+ "comment": "",
1057
+ "acc_norm_stderr": 0.05371235012133188
1058
+ },
1059
+ {
1060
+ "task_name": "tydiqa_primary",
1061
+ "prompt_name": "en_read_and_answer",
1062
+ "acc": 0.03685741998060136,
1063
+ "fixed_answer_choice_list": [
1064
+ "Yes",
1065
+ "No",
1066
+ "None"
1067
+ ],
1068
+ "dataset_path": "tydiqa",
1069
+ "dataset_name": "primary_task",
1070
+ "subset": null,
1071
+ "prompt_id": "7b8b7707-dbad-40d2-a5c2-430e6ace10bb",
1072
+ "prompt_jinja": "{% if language == \"english\" %}\nAnswer the following question with \"{{answer_choices[0]}}\" or \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\" if none of the first two answers apply.\nQuestion: {{question_text}}\nTopic: {{document_title}}\nArticle: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
1073
+ "prompt_original_task": true,
1074
+ "comment": "",
1075
+ "acc_stderr": 0.005870689955728103
1076
+ },
1077
+ {
1078
+ "task_name": "tydiqa_primary",
1079
+ "prompt_name": "en_read_and_answer",
1080
+ "acc_norm": 0.8845780795344326,
1081
+ "fixed_answer_choice_list": [
1082
+ "Yes",
1083
+ "No",
1084
+ "None"
1085
+ ],
1086
+ "dataset_path": "tydiqa",
1087
+ "dataset_name": "primary_task",
1088
+ "subset": null,
1089
+ "prompt_id": "7b8b7707-dbad-40d2-a5c2-430e6ace10bb",
1090
+ "prompt_jinja": "{% if language == \"english\" %}\nAnswer the following question with \"{{answer_choices[0]}}\" or \"{{answer_choices[1]}}\" or \"{{answer_choices[2]}}\" if none of the first two answers apply.\nQuestion: {{question_text}}\nTopic: {{document_title}}\nArticle: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
1091
+ "prompt_original_task": true,
1092
+ "comment": "",
1093
+ "acc_norm_stderr": 0.009956200231519313
1094
+ },
1095
+ {
1096
+ "task_name": "tydiqa_primary",
1097
+ "prompt_name": "en_yes_no_none",
1098
+ "acc": 0.037827352085354024,
1099
+ "fixed_answer_choice_list": [
1100
+ "Yes",
1101
+ "No",
1102
+ "None"
1103
+ ],
1104
+ "dataset_path": "tydiqa",
1105
+ "dataset_name": "primary_task",
1106
+ "subset": null,
1107
+ "prompt_id": "297fc59f-bd92-493b-ae61-3c3adcb46eb3",
1108
+ "prompt_jinja": "{% if language == \"english\" %} \nQuestion: {{question_text}}\nAnswer the question with {{\"Yes\"}} or {{\"No\"}}. If it is not possible then answer {{\"None\"}}.\nHint: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
1109
+ "prompt_original_task": true,
1110
+ "comment": "",
1111
+ "acc_stderr": 0.005944438823944305
1112
+ },
1113
+ {
1114
+ "task_name": "tydiqa_primary",
1115
+ "prompt_name": "en_yes_no_none",
1116
+ "acc_norm": 0.871968962172648,
1117
+ "fixed_answer_choice_list": [
1118
+ "Yes",
1119
+ "No",
1120
+ "None"
1121
+ ],
1122
+ "dataset_path": "tydiqa",
1123
+ "dataset_name": "primary_task",
1124
+ "subset": null,
1125
+ "prompt_id": "297fc59f-bd92-493b-ae61-3c3adcb46eb3",
1126
+ "prompt_jinja": "{% if language == \"english\" %} \nQuestion: {{question_text}}\nAnswer the question with {{\"Yes\"}} or {{\"No\"}}. If it is not possible then answer {{\"None\"}}.\nHint: {{document_plaintext}}\n|||\n{{annotations.yes_no_answer[0] | capitalize}}\n{% endif %}",
1127
+ "prompt_original_task": true,
1128
+ "comment": "",
1129
+ "acc_norm_stderr": 0.01041093017771443
1130
+ },
1131
+ {
1132
+ "task_name": "tydiqa_primary",
1133
+ "prompt_name": "en_yes_no_question",
1134
+ "acc": 0.7652764306498545,
1135
+ "fixed_answer_choice_list": [
1136
+ "Yes",
1137
+ "No"
1138
+ ],
1139
+ "dataset_path": "tydiqa",
1140
+ "dataset_name": "primary_task",
1141
+ "subset": null,
1142
+ "prompt_id": "6835dd64-96bd-4bf8-9ba5-645d6a7b8472",
1143
+ "prompt_jinja": "{% if language == \"english\" %}\n{{question_text}}\nIs this a \"Yes/No\" question?\n|||\n{% if annotations. yes_no_answer[0] == \"NONE\" %}\nNo\n{% else %}\nYes\n{% endif %}\n{% endif %}",
1144
+ "prompt_original_task": false,
1145
+ "comment": "",
1146
+ "acc_stderr": 0.013205927447521368
1147
+ },
1148
+ {
1149
+ "task_name": "tydiqa_primary",
1150
+ "prompt_name": "en_yes_no_question",
1151
+ "acc_norm": 0.07565470417070805,
1152
+ "fixed_answer_choice_list": [
1153
+ "Yes",
1154
+ "No"
1155
+ ],
1156
+ "dataset_path": "tydiqa",
1157
+ "dataset_name": "primary_task",
1158
+ "subset": null,
1159
+ "prompt_id": "6835dd64-96bd-4bf8-9ba5-645d6a7b8472",
1160
+ "prompt_jinja": "{% if language == \"english\" %}\n{{question_text}}\nIs this a \"Yes/No\" question?\n|||\n{% if annotations. yes_no_answer[0] == \"NONE\" %}\nNo\n{% else %}\nYes\n{% endif %}\n{% endif %}",
1161
+ "prompt_original_task": false,
1162
+ "comment": "",
1163
+ "acc_norm_stderr": 0.008239796273494257
1164
+ },
1165
+ {
1166
+ "task_name": "tydiqa_primary",
1167
+ "prompt_name": "id_after_reading_the_text",
1168
+ "acc": 0.2711864406779661,
1169
+ "fixed_answer_choice_list": [
1170
+ "Ya",
1171
+ "Tidak"
1172
+ ],
1173
+ "dataset_path": "tydiqa",
1174
+ "dataset_name": "primary_task",
1175
+ "subset": null,
1176
+ "prompt_id": "f93c6cde-cd5e-4d25-8549-f186546cea26",
1177
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nSetelah membaca potongan teks Wikipedia di bawah ini, mohon jawab pertanyaan: \n{{question_text}} \n{{document_plaintext}}\n||| \n{{{\"NO\":\"Tidak\", \"YES\":\"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1178
+ "prompt_original_task": true,
1179
+ "comment": "",
1180
+ "acc_stderr": 0.058375177038848765
1181
+ },
1182
+ {
1183
+ "task_name": "tydiqa_primary",
1184
+ "prompt_name": "id_after_reading_the_text",
1185
+ "acc_norm": 0.2033898305084746,
1186
+ "fixed_answer_choice_list": [
1187
+ "Ya",
1188
+ "Tidak"
1189
+ ],
1190
+ "dataset_path": "tydiqa",
1191
+ "dataset_name": "primary_task",
1192
+ "subset": null,
1193
+ "prompt_id": "f93c6cde-cd5e-4d25-8549-f186546cea26",
1194
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nSetelah membaca potongan teks Wikipedia di bawah ini, mohon jawab pertanyaan: \n{{question_text}} \n{{document_plaintext}}\n||| \n{{{\"NO\":\"Tidak\", \"YES\":\"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1195
+ "prompt_original_task": true,
1196
+ "comment": "",
1197
+ "acc_norm_stderr": 0.052853474644238056
1198
+ },
1199
+ {
1200
+ "task_name": "tydiqa_primary",
1201
+ "prompt_name": "id_based_on_the_text",
1202
+ "acc": 0.23728813559322035,
1203
+ "fixed_answer_choice_list": [
1204
+ "Ya",
1205
+ "Tidak"
1206
+ ],
1207
+ "dataset_path": "tydiqa",
1208
+ "dataset_name": "primary_task",
1209
+ "subset": null,
1210
+ "prompt_id": "fe910acd-a156-4f46-a757-4382821fcfd2",
1211
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nMenurut potongan teks berikut, {{question_text}}\n{{document_plaintext}}\n||| \n{{{\"NO\":\"Tidak\", \"YES\":\"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1212
+ "prompt_original_task": true,
1213
+ "comment": "",
1214
+ "acc_stderr": 0.05586042894941199
1215
+ },
1216
+ {
1217
+ "task_name": "tydiqa_primary",
1218
+ "prompt_name": "id_based_on_the_text",
1219
+ "acc_norm": 0.2033898305084746,
1220
+ "fixed_answer_choice_list": [
1221
+ "Ya",
1222
+ "Tidak"
1223
+ ],
1224
+ "dataset_path": "tydiqa",
1225
+ "dataset_name": "primary_task",
1226
+ "subset": null,
1227
+ "prompt_id": "fe910acd-a156-4f46-a757-4382821fcfd2",
1228
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nMenurut potongan teks berikut, {{question_text}}\n{{document_plaintext}}\n||| \n{{{\"NO\":\"Tidak\", \"YES\":\"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1229
+ "prompt_original_task": true,
1230
+ "comment": "",
1231
+ "acc_norm_stderr": 0.052853474644238056
1232
+ },
1233
+ {
1234
+ "task_name": "tydiqa_primary",
1235
+ "prompt_name": "id_heres_what_I_found",
1236
+ "acc": 0.007202216066481994,
1237
+ "fixed_answer_choice_list": [
1238
+ "Ya",
1239
+ "Tidak",
1240
+ "Tidak ada"
1241
+ ],
1242
+ "dataset_path": "tydiqa",
1243
+ "dataset_name": "primary_task",
1244
+ "subset": null,
1245
+ "prompt_id": "764fda4e-dc13-4766-b8ab-eafd79fe875e",
1246
+ "prompt_jinja": "{% if language == \"indonesian\" %}\nSaya penasaran {{question_text}}.\nTolong bantu saya menjawab pertanyaan ini dengan \"{{answer_choices[0]}}\", \"{{answer_choices[1]}}\" atau \"{{answer_choices[2]}}\" jika dua opsi pertama tidak bisa diaplikasikan.\nIni yang saya temukan di internet:\nTopik: {{document_title}}\nArtikel: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1247
+ "prompt_original_task": true,
1248
+ "comment": "",
1249
+ "acc_stderr": 0.001990880560147875
1250
+ },
1251
+ {
1252
+ "task_name": "tydiqa_primary",
1253
+ "prompt_name": "id_heres_what_I_found",
1254
+ "acc_norm": 0.9662049861495845,
1255
+ "fixed_answer_choice_list": [
1256
+ "Ya",
1257
+ "Tidak",
1258
+ "Tidak ada"
1259
+ ],
1260
+ "dataset_path": "tydiqa",
1261
+ "dataset_name": "primary_task",
1262
+ "subset": null,
1263
+ "prompt_id": "764fda4e-dc13-4766-b8ab-eafd79fe875e",
1264
+ "prompt_jinja": "{% if language == \"indonesian\" %}\nSaya penasaran {{question_text}}.\nTolong bantu saya menjawab pertanyaan ini dengan \"{{answer_choices[0]}}\", \"{{answer_choices[1]}}\" atau \"{{answer_choices[2]}}\" jika dua opsi pertama tidak bisa diaplikasikan.\nIni yang saya temukan di internet:\nTopik: {{document_title}}\nArtikel: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1265
+ "prompt_original_task": true,
1266
+ "comment": "",
1267
+ "acc_norm_stderr": 0.0042544427599910594
1268
+ },
1269
+ {
1270
+ "task_name": "tydiqa_primary",
1271
+ "prompt_name": "id_open_domain_qa",
1272
+ "acc": 0.4576271186440678,
1273
+ "fixed_answer_choice_list": [
1274
+ "Ya",
1275
+ "Tidak"
1276
+ ],
1277
+ "dataset_path": "tydiqa",
1278
+ "dataset_name": "primary_task",
1279
+ "subset": null,
1280
+ "prompt_id": "976fb48f-7135-4344-91c8-cee2e535b8ab",
1281
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nJawab pertanyaan terkait {{document_title}}.\nPertanyaan: {{question_text}}. Ya atau Tidak?\n||| \n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1282
+ "prompt_original_task": false,
1283
+ "comment": "",
1284
+ "acc_stderr": 0.06541703602400106
1285
+ },
1286
+ {
1287
+ "task_name": "tydiqa_primary",
1288
+ "prompt_name": "id_open_domain_qa",
1289
+ "acc_norm": 0.2033898305084746,
1290
+ "fixed_answer_choice_list": [
1291
+ "Ya",
1292
+ "Tidak"
1293
+ ],
1294
+ "dataset_path": "tydiqa",
1295
+ "dataset_name": "primary_task",
1296
+ "subset": null,
1297
+ "prompt_id": "976fb48f-7135-4344-91c8-cee2e535b8ab",
1298
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nJawab pertanyaan terkait {{document_title}}.\nPertanyaan: {{question_text}}. Ya atau Tidak?\n||| \n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1299
+ "prompt_original_task": false,
1300
+ "comment": "",
1301
+ "acc_norm_stderr": 0.052853474644238056
1302
+ },
1303
+ {
1304
+ "task_name": "tydiqa_primary",
1305
+ "prompt_name": "id_open_domain_qa_without_choices",
1306
+ "acc": 0.2711864406779661,
1307
+ "fixed_answer_choice_list": [
1308
+ "Ya",
1309
+ "Tidak"
1310
+ ],
1311
+ "dataset_path": "tydiqa",
1312
+ "dataset_name": "primary_task",
1313
+ "subset": null,
1314
+ "prompt_id": "d6139cbc-7b25-4539-80c7-2b0832183951",
1315
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nJawab pertanyaan terkait {{document_title}}. {{question_text}}\n||| \n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %} \n{% endif %} ",
1316
+ "prompt_original_task": false,
1317
+ "comment": "",
1318
+ "acc_stderr": 0.05837517703884878
1319
+ },
1320
+ {
1321
+ "task_name": "tydiqa_primary",
1322
+ "prompt_name": "id_open_domain_qa_without_choices",
1323
+ "acc_norm": 0.2033898305084746,
1324
+ "fixed_answer_choice_list": [
1325
+ "Ya",
1326
+ "Tidak"
1327
+ ],
1328
+ "dataset_path": "tydiqa",
1329
+ "dataset_name": "primary_task",
1330
+ "subset": null,
1331
+ "prompt_id": "d6139cbc-7b25-4539-80c7-2b0832183951",
1332
+ "prompt_jinja": "{% if language == \"indonesian\" %} \n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \nJawab pertanyaan terkait {{document_title}}. {{question_text}}\n||| \n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n {% endif %} \n{% endif %} ",
1333
+ "prompt_original_task": false,
1334
+ "comment": "",
1335
+ "acc_norm_stderr": 0.052853474644238056
1336
+ },
1337
+ {
1338
+ "task_name": "tydiqa_primary",
1339
+ "prompt_name": "id_read_and_answer",
1340
+ "acc": 0.007202216066481994,
1341
+ "fixed_answer_choice_list": [
1342
+ "Ya",
1343
+ "Tidak",
1344
+ "Tidak ada"
1345
+ ],
1346
+ "dataset_path": "tydiqa",
1347
+ "dataset_name": "primary_task",
1348
+ "subset": null,
1349
+ "prompt_id": "b368b4af-b8b1-4b0f-ab72-a4db0649ca65",
1350
+ "prompt_jinja": "{% if language == \"indonesian\" %}\nJawab pertanyaan berikut dengan \"{{answer_choices[0]}}\" atau \"{{answer_choices[1]}}\" atau \"{{answer_choices[2]}}\" jika dua\nopsi pertama tidak dapat diaplikasikan.\nPertanyaan: {{question_text}}\nTopik: {{document_title}}\nArtikel: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1351
+ "prompt_original_task": true,
1352
+ "comment": "",
1353
+ "acc_stderr": 0.0019908805601478756
1354
+ },
1355
+ {
1356
+ "task_name": "tydiqa_primary",
1357
+ "prompt_name": "id_read_and_answer",
1358
+ "acc_norm": 0.9662049861495845,
1359
+ "fixed_answer_choice_list": [
1360
+ "Ya",
1361
+ "Tidak",
1362
+ "Tidak ada"
1363
+ ],
1364
+ "dataset_path": "tydiqa",
1365
+ "dataset_name": "primary_task",
1366
+ "subset": null,
1367
+ "prompt_id": "b368b4af-b8b1-4b0f-ab72-a4db0649ca65",
1368
+ "prompt_jinja": "{% if language == \"indonesian\" %}\nJawab pertanyaan berikut dengan \"{{answer_choices[0]}}\" atau \"{{answer_choices[1]}}\" atau \"{{answer_choices[2]}}\" jika dua\nopsi pertama tidak dapat diaplikasikan.\nPertanyaan: {{question_text}}\nTopik: {{document_title}}\nArtikel: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1369
+ "prompt_original_task": true,
1370
+ "comment": "",
1371
+ "acc_norm_stderr": 0.0042544427599910594
1372
+ },
1373
+ {
1374
+ "task_name": "tydiqa_primary",
1375
+ "prompt_name": "id_yes_no_none",
1376
+ "acc": 0.008310249307479225,
1377
+ "fixed_answer_choice_list": [
1378
+ "Ya",
1379
+ "Tidak",
1380
+ "Tidak ada"
1381
+ ],
1382
+ "dataset_path": "tydiqa",
1383
+ "dataset_name": "primary_task",
1384
+ "subset": null,
1385
+ "prompt_id": "5c48e84c-10e4-44ee-b2b3-94a4d018e833",
1386
+ "prompt_jinja": "{% if language == \"indonesian\" %} \nPertanyaan: {{question_text}}\nJawab pertanyaan tersebut dengan {{\"Ya\"}} atau {{\"Tidak\"}}. Jika tidak memungkinkan, jawab dengan {{\"Tidak ada\"}}.\nPetunjuk: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1387
+ "prompt_original_task": true,
1388
+ "comment": "",
1389
+ "acc_stderr": 0.002137355052582956
1390
+ },
1391
+ {
1392
+ "task_name": "tydiqa_primary",
1393
+ "prompt_name": "id_yes_no_none",
1394
+ "acc_norm": 0.9662049861495845,
1395
+ "fixed_answer_choice_list": [
1396
+ "Ya",
1397
+ "Tidak",
1398
+ "Tidak ada"
1399
+ ],
1400
+ "dataset_path": "tydiqa",
1401
+ "dataset_name": "primary_task",
1402
+ "subset": null,
1403
+ "prompt_id": "5c48e84c-10e4-44ee-b2b3-94a4d018e833",
1404
+ "prompt_jinja": "{% if language == \"indonesian\" %} \nPertanyaan: {{question_text}}\nJawab pertanyaan tersebut dengan {{\"Ya\"}} atau {{\"Tidak\"}}. Jika tidak memungkinkan, jawab dengan {{\"Tidak ada\"}}.\nPetunjuk: {{document_plaintext}}\n|||\n{{{\"NO\":\"Tidak\",\"YES\": \"Ya\", \"NONE\": \"Tidak ada\"}[annotations.yes_no_answer[0]]}}\n{% endif %}",
1405
+ "prompt_original_task": true,
1406
+ "comment": "",
1407
+ "acc_norm_stderr": 0.0042544427599910594
1408
+ },
1409
+ {
1410
+ "task_name": "tydiqa_primary",
1411
+ "prompt_name": "id_yes_no_question",
1412
+ "acc": 0.8138504155124654,
1413
+ "fixed_answer_choice_list": [
1414
+ "Ya",
1415
+ "Tidak"
1416
+ ],
1417
+ "dataset_path": "tydiqa",
1418
+ "dataset_name": "primary_task",
1419
+ "subset": null,
1420
+ "prompt_id": "ebba1db1-daf2-4e40-9dca-4cbe4298cd3e",
1421
+ "prompt_jinja": "{% if language == \"indonesian\" %}\n{{question_text}}\nApakah ini termasuk kalimat tanya \"Ya/Tidak\"?\n|||\n{% if annotations. yes_no_answer[0] == \"NONE\" %}\nTidak\n{% else %}\nYa\n{% endif %}\n{% endif %}",
1422
+ "prompt_original_task": false,
1423
+ "comment": "",
1424
+ "acc_stderr": 0.009163999646097152
1425
+ },
1426
+ {
1427
+ "task_name": "tydiqa_primary",
1428
+ "prompt_name": "id_yes_no_question",
1429
+ "acc_norm": 0.9673130193905817,
1430
+ "fixed_answer_choice_list": [
1431
+ "Ya",
1432
+ "Tidak"
1433
+ ],
1434
+ "dataset_path": "tydiqa",
1435
+ "dataset_name": "primary_task",
1436
+ "subset": null,
1437
+ "prompt_id": "ebba1db1-daf2-4e40-9dca-4cbe4298cd3e",
1438
+ "prompt_jinja": "{% if language == \"indonesian\" %}\n{{question_text}}\nApakah ini termasuk kalimat tanya \"Ya/Tidak\"?\n|||\n{% if annotations. yes_no_answer[0] == \"NONE\" %}\nTidak\n{% else %}\nYa\n{% endif %}\n{% endif %}",
1439
+ "prompt_original_task": false,
1440
+ "comment": "",
1441
+ "acc_norm_stderr": 0.0041865150102794995
1442
+ },
1443
+ {
1444
+ "task_name": "tydiqa_primary",
1445
+ "prompt_name": "jp_after_reading_the_text",
1446
+ "acc": 0.7635135135135135,
1447
+ "fixed_answer_choice_list": [
1448
+ "\u306f\u3044",
1449
+ "\u3044\u3044\u3048"
1450
+ ],
1451
+ "dataset_path": "tydiqa",
1452
+ "dataset_name": "primary_task",
1453
+ "subset": null,
1454
+ "prompt_id": "d160228e-9169-456d-a16a-0f5288452c9a",
1455
+ "prompt_jinja": "{% if language == \"japanese\" %}\n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \n \u30a6\u30a3\u30ad\u30da\u30c7\u30a3\u30a2\u306e\u8a18\u4e8b\u3092\u8aad\u3093\u3060\u3042\u3068\u3001\u6b21\u306e\u8cea\u554f\u306b\u7b54\u3048\u3066\u304f\u3060\u3055\u3044: {{question_text}}\n {{document_plaintext}}\n |||\n {{{\"YES\":\"\u306f\u3044\", \"NO\":\"\u3044\u3044\u3048\", \"NONE\": \"\u3069\u3061\u3089\u3067\u3082\u306a\u3044\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1456
+ "prompt_original_task": true,
1457
+ "comment": "",
1458
+ "acc_stderr": 0.03504716241250439
1459
+ },
1460
+ {
1461
+ "task_name": "tydiqa_primary",
1462
+ "prompt_name": "jp_after_reading_the_text",
1463
+ "acc_norm": 0.2972972972972973,
1464
+ "fixed_answer_choice_list": [
1465
+ "\u306f\u3044",
1466
+ "\u3044\u3044\u3048"
1467
+ ],
1468
+ "dataset_path": "tydiqa",
1469
+ "dataset_name": "primary_task",
1470
+ "subset": null,
1471
+ "prompt_id": "d160228e-9169-456d-a16a-0f5288452c9a",
1472
+ "prompt_jinja": "{% if language == \"japanese\" %}\n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %} \n \u30a6\u30a3\u30ad\u30da\u30c7\u30a3\u30a2\u306e\u8a18\u4e8b\u3092\u8aad\u3093\u3060\u3042\u3068\u3001\u6b21\u306e\u8cea\u554f\u306b\u7b54\u3048\u3066\u304f\u3060\u3055\u3044: {{question_text}}\n {{document_plaintext}}\n |||\n {{{\"YES\":\"\u306f\u3044\", \"NO\":\"\u3044\u3044\u3048\", \"NONE\": \"\u3069\u3061\u3089\u3067\u3082\u306a\u3044\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1473
+ "prompt_original_task": true,
1474
+ "comment": "",
1475
+ "acc_norm_stderr": 0.037698374558241474
1476
+ },
1477
+ {
1478
+ "task_name": "tydiqa_primary",
1479
+ "prompt_name": "jp_based_on_the_text",
1480
+ "acc": 0.7635135135135135,
1481
+ "fixed_answer_choice_list": [
1482
+ "\u306f\u3044",
1483
+ "\u3044\u3044\u3048"
1484
+ ],
1485
+ "dataset_path": "tydiqa",
1486
+ "dataset_name": "primary_task",
1487
+ "subset": null,
1488
+ "prompt_id": "733a3ff3-6edd-4440-b038-bf9736ebaff7",
1489
+ "prompt_jinja": "{% if language == \"japanese\" %}\n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %}\n \u6b21\u306e\u6587\u7ae0\u306b\u3082\u3068\u3065\u304f\u3068\u3001 , {{question_text}} \n {{document_plaintext}}\n ||| \n {{{\"YES\":\"\u306f\u3044\", \"NO\":\"\u3044\u3044\u3048\", \"NONE\": \"\u3069\u3061\u3089\u3067\u3082\u306a\u3044\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1490
+ "prompt_original_task": true,
1491
+ "comment": "",
1492
+ "acc_stderr": 0.03504716241250439
1493
+ },
1494
+ {
1495
+ "task_name": "tydiqa_primary",
1496
+ "prompt_name": "jp_based_on_the_text",
1497
+ "acc_norm": 0.2905405405405405,
1498
+ "fixed_answer_choice_list": [
1499
+ "\u306f\u3044",
1500
+ "\u3044\u3044\u3048"
1501
+ ],
1502
+ "dataset_path": "tydiqa",
1503
+ "dataset_name": "primary_task",
1504
+ "subset": null,
1505
+ "prompt_id": "733a3ff3-6edd-4440-b038-bf9736ebaff7",
1506
+ "prompt_jinja": "{% if language == \"japanese\" %}\n {% if annotations.yes_no_answer[0] == \"YES\" or annotations.yes_no_answer[0] == \"NO\" %}\n \u6b21\u306e\u6587\u7ae0\u306b\u3082\u3068\u3065\u304f\u3068\u3001 , {{question_text}} \n {{document_plaintext}}\n ||| \n {{{\"YES\":\"\u306f\u3044\", \"NO\":\"\u3044\u3044\u3048\", \"NONE\": \"\u3069\u3061\u3089\u3067\u3082\u306a\u3044\"}[annotations.yes_no_answer[0]]}}\n {% endif %}\n{% endif %}",
1507
+ "prompt_original_task": true,
1508
+ "comment": "",
1509
+ "acc_norm_stderr": 0.03744626397928733
1510
+ }
1511
+ ],
1512
+ "versions": {
1513
+ "multirc+I was going to say\u2026": 0,
1514
+ "multirc+Would it be good to answer\u2026": 0,
1515
+ "multirc+confirm": 0,
1516
+ "multirc+correct": 0,
1517
+ "multirc+decide_valid": 0,
1518
+ "multirc+found_this_answer": 0,
1519
+ "multirc+grading": 0,
1520
+ "multirc+is the correct answer\u2026": 0,
1521
+ "multirc+is\u2026 a correct answer?": 0,
1522
+ "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": 0,
1523
+ "qqp+answer": 0,
1524
+ "qqp+duplicate": 0,
1525
+ "qqp+duplicate or not": 0,
1526
+ "qqp+meaning": 0,
1527
+ "qqp+quora": 0,
1528
+ "qqp+same thing": 0,
1529
+ "rte+does the claim\u2026 follow the fact\u2026": 0,
1530
+ "rte+entailment explained": 0,
1531
+ "rte+imply": 0,
1532
+ "rte+imply separated": 0,
1533
+ "rte+mean": 0,
1534
+ "sst+following positive negative": 0,
1535
+ "sst+happy or mad": 0,
1536
+ "sst+positive negative after": 0,
1537
+ "sst+review": 0,
1538
+ "sst+said": 0,
1539
+ "tydiqa_primary+en_after_reading_the_text": 0,
1540
+ "tydiqa_primary+en_based_on_the_text": 0,
1541
+ "tydiqa_primary+en_heres_what_I_found": 0,
1542
+ "tydiqa_primary+en_open_domain_qa": 0,
1543
+ "tydiqa_primary+en_open_domain_qa_without_choices": 0,
1544
+ "tydiqa_primary+en_read_and_answer": 0,
1545
+ "tydiqa_primary+en_yes_no_none": 0,
1546
+ "tydiqa_primary+en_yes_no_question": 0,
1547
+ "tydiqa_primary+id_after_reading_the_text": 0,
1548
+ "tydiqa_primary+id_based_on_the_text": 0,
1549
+ "tydiqa_primary+id_heres_what_I_found": 0,
1550
+ "tydiqa_primary+id_open_domain_qa": 0,
1551
+ "tydiqa_primary+id_open_domain_qa_without_choices": 0,
1552
+ "tydiqa_primary+id_read_and_answer": 0,
1553
+ "tydiqa_primary+id_yes_no_none": 0,
1554
+ "tydiqa_primary+id_yes_no_question": 0,
1555
+ "tydiqa_primary+jp_after_reading_the_text": 0,
1556
+ "tydiqa_primary+jp_based_on_the_text": 0
1557
+ },
1558
+ "table_results": {
1559
+ "multirc+I was going to say\u2026": {
1560
+ "task_name": "multirc",
1561
+ "prompt_name": "I was going to say\u2026",
1562
+ "acc": 0.5724009900990099,
1563
+ "acc_stderr": 0.007106111600745623,
1564
+ "acc_norm": 0.42883663366336633,
1565
+ "acc_norm_stderr": 0.00710869042313772
1566
+ },
1567
+ "multirc+Would it be good to answer\u2026": {
1568
+ "task_name": "multirc",
1569
+ "prompt_name": "Would it be good to answer\u2026",
1570
+ "acc": 0.5204207920792079,
1571
+ "acc_stderr": 0.0071758108566598,
1572
+ "acc_norm": 0.43337458745874585,
1573
+ "acc_norm_stderr": 0.00711775827463544
1574
+ },
1575
+ "multirc+confirm": {
1576
+ "task_name": "multirc",
1577
+ "prompt_name": "confirm",
1578
+ "acc": 0.4329620462046205,
1579
+ "acc_stderr": 0.007116959070151668,
1580
+ "acc_norm": 0.4280115511551155,
1581
+ "acc_norm_stderr": 0.007106976252751536
1582
+ },
1583
+ "multirc+correct": {
1584
+ "task_name": "multirc",
1585
+ "prompt_name": "correct",
1586
+ "acc": 0.5721947194719472,
1587
+ "acc_stderr": 0.007106544557507229,
1588
+ "acc_norm": 0.4709158415841584,
1589
+ "acc_norm_stderr": 0.00716964280499065
1590
+ },
1591
+ "multirc+decide_valid": {
1592
+ "task_name": "multirc",
1593
+ "prompt_name": "decide_valid",
1594
+ "acc": 0.5375412541254125,
1595
+ "acc_stderr": 0.007161531207958062,
1596
+ "acc_norm": 0.4280115511551155,
1597
+ "acc_norm_stderr": 0.007106976252751536
1598
+ },
1599
+ "multirc+found_this_answer": {
1600
+ "task_name": "multirc",
1601
+ "prompt_name": "found_this_answer",
1602
+ "acc": 0.4773102310231023,
1603
+ "acc_stderr": 0.007174404542630741,
1604
+ "acc_norm": 0.4280115511551155,
1605
+ "acc_norm_stderr": 0.007106976252751536
1606
+ },
1607
+ "multirc+grading": {
1608
+ "task_name": "multirc",
1609
+ "prompt_name": "grading",
1610
+ "acc": 0.5874587458745875,
1611
+ "acc_stderr": 0.007071081930208332,
1612
+ "acc_norm": 0.4280115511551155,
1613
+ "acc_norm_stderr": 0.007106976252751536
1614
+ },
1615
+ "multirc+is the correct answer\u2026": {
1616
+ "task_name": "multirc",
1617
+ "prompt_name": "is the correct answer\u2026",
1618
+ "acc": 0.5478547854785478,
1619
+ "acc_stderr": 0.007148833615093023,
1620
+ "acc_norm": 0.4278052805280528,
1621
+ "acc_norm_stderr": 0.007106544557507229
1622
+ },
1623
+ "multirc+is\u2026 a correct answer?": {
1624
+ "task_name": "multirc",
1625
+ "prompt_name": "is\u2026 a correct answer?",
1626
+ "acc": 0.45028877887788776,
1627
+ "acc_stderr": 0.007146219530521704,
1628
+ "acc_norm": 0.4280115511551155,
1629
+ "acc_norm_stderr": 0.007106976252751536
1630
+ },
1631
+ "multirc+paragraph\u2026 question\u2026 is it\u2026 ?": {
1632
+ "task_name": "multirc",
1633
+ "prompt_name": "paragraph\u2026 question\u2026 is it\u2026 ?",
1634
+ "acc": 0.5581683168316832,
1635
+ "acc_stderr": 0.007133037518848498,
1636
+ "acc_norm": 0.429042904290429,
1637
+ "acc_norm_stderr": 0.007109115814226985
1638
+ },
1639
+ "qqp+answer": {
1640
+ "task_name": "qqp",
1641
+ "prompt_name": "answer",
1642
+ "acc": 0.4095720999257977,
1643
+ "acc_stderr": 0.0024456940020775335,
1644
+ "acc_norm": 0.36816720257234725,
1645
+ "acc_norm_stderr": 0.002398706610614492
1646
+ },
1647
+ "qqp+duplicate": {
1648
+ "task_name": "qqp",
1649
+ "prompt_name": "duplicate",
1650
+ "acc": 0.5389809547365817,
1651
+ "acc_stderr": 0.0024791319564636633,
1652
+ "acc_norm": 0.36816720257234725,
1653
+ "acc_norm_stderr": 0.002398706610614492
1654
+ },
1655
+ "qqp+duplicate or not": {
1656
+ "task_name": "qqp",
1657
+ "prompt_name": "duplicate or not",
1658
+ "acc": 0.3811526094484294,
1659
+ "acc_stderr": 0.0024154315297388092,
1660
+ "acc_norm": 0.6317585951026465,
1661
+ "acc_norm_stderr": 0.00239880745215712
1662
+ },
1663
+ "qqp+meaning": {
1664
+ "task_name": "qqp",
1665
+ "prompt_name": "meaning",
1666
+ "acc": 0.3842443729903537,
1667
+ "acc_stderr": 0.0024191425100536248,
1668
+ "acc_norm": 0.36816720257234725,
1669
+ "acc_norm_stderr": 0.002398706610614492
1670
+ },
1671
+ "qqp+quora": {
1672
+ "task_name": "qqp",
1673
+ "prompt_name": "quora",
1674
+ "acc": 0.36826613900568883,
1675
+ "acc_stderr": 0.002398841052447127,
1676
+ "acc_norm": 0.36816720257234725,
1677
+ "acc_norm_stderr": 0.002398706610614492
1678
+ },
1679
+ "qqp+same thing": {
1680
+ "task_name": "qqp",
1681
+ "prompt_name": "same thing",
1682
+ "acc": 0.5813999505317833,
1683
+ "acc_stderr": 0.0024535258231136925,
1684
+ "acc_norm": 0.36816720257234725,
1685
+ "acc_norm_stderr": 0.002398706610614492
1686
+ },
1687
+ "rte+does the claim\u2026 follow the fact\u2026": {
1688
+ "task_name": "rte",
1689
+ "prompt_name": "does the claim\u2026 follow the fact\u2026",
1690
+ "acc": 0.4729241877256318,
1691
+ "acc_stderr": 0.0300523034631437,
1692
+ "acc_norm": 0.5270758122743683,
1693
+ "acc_norm_stderr": 0.0300523034631437
1694
+ },
1695
+ "rte+entailment explained": {
1696
+ "task_name": "rte",
1697
+ "prompt_name": "entailment explained",
1698
+ "acc": 0.516245487364621,
1699
+ "acc_stderr": 0.030080573208738064,
1700
+ "acc_norm": 0.4729241877256318,
1701
+ "acc_norm_stderr": 0.0300523034631437
1702
+ },
1703
+ "rte+imply": {
1704
+ "task_name": "rte",
1705
+ "prompt_name": "imply",
1706
+ "acc": 0.47653429602888087,
1707
+ "acc_stderr": 0.030063300411902652,
1708
+ "acc_norm": 0.5270758122743683,
1709
+ "acc_norm_stderr": 0.0300523034631437
1710
+ },
1711
+ "rte+imply separated": {
1712
+ "task_name": "rte",
1713
+ "prompt_name": "imply separated",
1714
+ "acc": 0.4620938628158845,
1715
+ "acc_stderr": 0.03000984891252911,
1716
+ "acc_norm": 0.5270758122743683,
1717
+ "acc_norm_stderr": 0.0300523034631437
1718
+ },
1719
+ "rte+mean": {
1720
+ "task_name": "rte",
1721
+ "prompt_name": "mean",
1722
+ "acc": 0.47653429602888087,
1723
+ "acc_stderr": 0.030063300411902652,
1724
+ "acc_norm": 0.5270758122743683,
1725
+ "acc_norm_stderr": 0.0300523034631437
1726
+ },
1727
+ "sst+following positive negative": {
1728
+ "task_name": "sst",
1729
+ "prompt_name": "following positive negative",
1730
+ "acc": 0.7603211009174312,
1731
+ "acc_stderr": 0.014464530608155847,
1732
+ "acc_norm": 0.7603211009174312,
1733
+ "acc_norm_stderr": 0.014464530608155847
1734
+ },
1735
+ "sst+happy or mad": {
1736
+ "task_name": "sst",
1737
+ "prompt_name": "happy or mad",
1738
+ "acc": 0.5091743119266054,
1739
+ "acc_stderr": 0.01693900152535154,
1740
+ "acc_norm": 0.5091743119266054,
1741
+ "acc_norm_stderr": 0.01693900152535154
1742
+ },
1743
+ "sst+positive negative after": {
1744
+ "task_name": "sst",
1745
+ "prompt_name": "positive negative after",
1746
+ "acc": 0.5263761467889908,
1747
+ "acc_stderr": 0.016918264333564144,
1748
+ "acc_norm": 0.5263761467889908,
1749
+ "acc_norm_stderr": 0.016918264333564144
1750
+ },
1751
+ "sst+review": {
1752
+ "task_name": "sst",
1753
+ "prompt_name": "review",
1754
+ "acc": 0.5722477064220184,
1755
+ "acc_stderr": 0.016764056901835654,
1756
+ "acc_norm": 0.5722477064220184,
1757
+ "acc_norm_stderr": 0.016764056901835654
1758
+ },
1759
+ "sst+said": {
1760
+ "task_name": "sst",
1761
+ "prompt_name": "said",
1762
+ "acc": 0.5022935779816514,
1763
+ "acc_stderr": 0.016941675443113525,
1764
+ "acc_norm": 0.5091743119266054,
1765
+ "acc_norm_stderr": 0.01693900152535154
1766
+ },
1767
+ "tydiqa_primary+en_after_reading_the_text": {
1768
+ "task_name": "tydiqa_primary",
1769
+ "prompt_name": "en_after_reading_the_text",
1770
+ "acc": 0.35064935064935066,
1771
+ "acc_stderr": 0.054735534443086,
1772
+ "acc_norm": 0.6493506493506493,
1773
+ "acc_norm_stderr": 0.054735534443086
1774
+ },
1775
+ "tydiqa_primary+en_based_on_the_text": {
1776
+ "task_name": "tydiqa_primary",
1777
+ "prompt_name": "en_based_on_the_text",
1778
+ "acc": 0.33766233766233766,
1779
+ "acc_stderr": 0.05424681453014242,
1780
+ "acc_norm": 0.6363636363636364,
1781
+ "acc_norm_stderr": 0.055179725333353066
1782
+ },
1783
+ "tydiqa_primary+en_heres_what_I_found": {
1784
+ "task_name": "tydiqa_primary",
1785
+ "prompt_name": "en_heres_what_I_found",
1786
+ "acc": 0.03685741998060136,
1787
+ "acc_stderr": 0.005870689955728106,
1788
+ "acc_norm": 0.8661493695441319,
1789
+ "acc_norm_stderr": 0.010609330898735572
1790
+ },
1791
+ "tydiqa_primary+en_open_domain_qa": {
1792
+ "task_name": "tydiqa_primary",
1793
+ "prompt_name": "en_open_domain_qa",
1794
+ "acc": 0.6753246753246753,
1795
+ "acc_stderr": 0.05371235012133188,
1796
+ "acc_norm": 0.6753246753246753,
1797
+ "acc_norm_stderr": 0.05371235012133188
1798
+ },
1799
+ "tydiqa_primary+en_open_domain_qa_without_choices": {
1800
+ "task_name": "tydiqa_primary",
1801
+ "prompt_name": "en_open_domain_qa_without_choices",
1802
+ "acc": 0.6753246753246753,
1803
+ "acc_stderr": 0.05371235012133188,
1804
+ "acc_norm": 0.6753246753246753,
1805
+ "acc_norm_stderr": 0.05371235012133188
1806
+ },
1807
+ "tydiqa_primary+en_read_and_answer": {
1808
+ "task_name": "tydiqa_primary",
1809
+ "prompt_name": "en_read_and_answer",
1810
+ "acc": 0.03685741998060136,
1811
+ "acc_stderr": 0.005870689955728103,
1812
+ "acc_norm": 0.8845780795344326,
1813
+ "acc_norm_stderr": 0.009956200231519313
1814
+ },
1815
+ "tydiqa_primary+en_yes_no_none": {
1816
+ "task_name": "tydiqa_primary",
1817
+ "prompt_name": "en_yes_no_none",
1818
+ "acc": 0.037827352085354024,
1819
+ "acc_stderr": 0.005944438823944305,
1820
+ "acc_norm": 0.871968962172648,
1821
+ "acc_norm_stderr": 0.01041093017771443
1822
+ },
1823
+ "tydiqa_primary+en_yes_no_question": {
1824
+ "task_name": "tydiqa_primary",
1825
+ "prompt_name": "en_yes_no_question",
1826
+ "acc": 0.7652764306498545,
1827
+ "acc_stderr": 0.013205927447521368,
1828
+ "acc_norm": 0.07565470417070805,
1829
+ "acc_norm_stderr": 0.008239796273494257
1830
+ },
1831
+ "tydiqa_primary+id_after_reading_the_text": {
1832
+ "task_name": "tydiqa_primary",
1833
+ "prompt_name": "id_after_reading_the_text",
1834
+ "acc": 0.2711864406779661,
1835
+ "acc_stderr": 0.058375177038848765,
1836
+ "acc_norm": 0.2033898305084746,
1837
+ "acc_norm_stderr": 0.052853474644238056
1838
+ },
1839
+ "tydiqa_primary+id_based_on_the_text": {
1840
+ "task_name": "tydiqa_primary",
1841
+ "prompt_name": "id_based_on_the_text",
1842
+ "acc": 0.23728813559322035,
1843
+ "acc_stderr": 0.05586042894941199,
1844
+ "acc_norm": 0.2033898305084746,
1845
+ "acc_norm_stderr": 0.052853474644238056
1846
+ },
1847
+ "tydiqa_primary+id_heres_what_I_found": {
1848
+ "task_name": "tydiqa_primary",
1849
+ "prompt_name": "id_heres_what_I_found",
1850
+ "acc": 0.007202216066481994,
1851
+ "acc_stderr": 0.001990880560147875,
1852
+ "acc_norm": 0.9662049861495845,
1853
+ "acc_norm_stderr": 0.0042544427599910594
1854
+ },
1855
+ "tydiqa_primary+id_open_domain_qa": {
1856
+ "task_name": "tydiqa_primary",
1857
+ "prompt_name": "id_open_domain_qa",
1858
+ "acc": 0.4576271186440678,
1859
+ "acc_stderr": 0.06541703602400106,
1860
+ "acc_norm": 0.2033898305084746,
1861
+ "acc_norm_stderr": 0.052853474644238056
1862
+ },
1863
+ "tydiqa_primary+id_open_domain_qa_without_choices": {
1864
+ "task_name": "tydiqa_primary",
1865
+ "prompt_name": "id_open_domain_qa_without_choices",
1866
+ "acc": 0.2711864406779661,
1867
+ "acc_stderr": 0.05837517703884878,
1868
+ "acc_norm": 0.2033898305084746,
1869
+ "acc_norm_stderr": 0.052853474644238056
1870
+ },
1871
+ "tydiqa_primary+id_read_and_answer": {
1872
+ "task_name": "tydiqa_primary",
1873
+ "prompt_name": "id_read_and_answer",
1874
+ "acc": 0.007202216066481994,
1875
+ "acc_stderr": 0.0019908805601478756,
1876
+ "acc_norm": 0.9662049861495845,
1877
+ "acc_norm_stderr": 0.0042544427599910594
1878
+ },
1879
+ "tydiqa_primary+id_yes_no_none": {
1880
+ "task_name": "tydiqa_primary",
1881
+ "prompt_name": "id_yes_no_none",
1882
+ "acc": 0.008310249307479225,
1883
+ "acc_stderr": 0.002137355052582956,
1884
+ "acc_norm": 0.9662049861495845,
1885
+ "acc_norm_stderr": 0.0042544427599910594
1886
+ },
1887
+ "tydiqa_primary+id_yes_no_question": {
1888
+ "task_name": "tydiqa_primary",
1889
+ "prompt_name": "id_yes_no_question",
1890
+ "acc": 0.8138504155124654,
1891
+ "acc_stderr": 0.009163999646097152,
1892
+ "acc_norm": 0.9673130193905817,
1893
+ "acc_norm_stderr": 0.0041865150102794995
1894
+ },
1895
+ "tydiqa_primary+jp_after_reading_the_text": {
1896
+ "task_name": "tydiqa_primary",
1897
+ "prompt_name": "jp_after_reading_the_text",
1898
+ "acc": 0.7635135135135135,
1899
+ "acc_stderr": 0.03504716241250439,
1900
+ "acc_norm": 0.2972972972972973,
1901
+ "acc_norm_stderr": 0.037698374558241474
1902
+ },
1903
+ "tydiqa_primary+jp_based_on_the_text": {
1904
+ "task_name": "tydiqa_primary",
1905
+ "prompt_name": "jp_based_on_the_text",
1906
+ "acc": 0.7635135135135135,
1907
+ "acc_stderr": 0.03504716241250439,
1908
+ "acc_norm": 0.2905405405405405,
1909
+ "acc_norm_stderr": 0.03744626397928733
1910
+ }
1911
+ },
1912
+ "config": {
1913
+ "adaptive_seq_len": true,
1914
+ "num_fewshot": 0,
1915
+ "bootstrap_iters": 100000
1916
+ }
1917
+ }
evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-14-13-10-19.json ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/results/tr11/bloom2b5/bslmevalfiles/tr11c-2b5-ml-bsevalharness-results_lm-eval_global_step337250_2022-07-14-20-09-16.json ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": [
3
+ {
4
+ "task_name": "gsarti/flores_101_kor",
5
+ "prompt_name": null,
6
+ "word_perplexity": 1199924.6918920355
7
+ },
8
+ {
9
+ "task_name": "gsarti/flores_101_kor",
10
+ "prompt_name": null,
11
+ "byte_perplexity": 3.932884847226212
12
+ },
13
+ {
14
+ "task_name": "gsarti/flores_101_kor",
15
+ "prompt_name": null,
16
+ "bits_per_byte": 1.9755879455567535
17
+ },
18
+ {
19
+ "task_name": "gsarti/flores_101_kir",
20
+ "prompt_name": null,
21
+ "word_perplexity": 140474672.36703426
22
+ },
23
+ {
24
+ "task_name": "gsarti/flores_101_kir",
25
+ "prompt_name": null,
26
+ "byte_perplexity": 3.729278369847201
27
+ },
28
+ {
29
+ "task_name": "gsarti/flores_101_kir",
30
+ "prompt_name": null,
31
+ "bits_per_byte": 1.8988964902756764
32
+ },
33
+ {
34
+ "task_name": "gsarti/flores_101_lao",
35
+ "prompt_name": null,
36
+ "word_perplexity": 6.1350041352351446e+26
37
+ },
38
+ {
39
+ "task_name": "gsarti/flores_101_lao",
40
+ "prompt_name": null,
41
+ "byte_perplexity": 2.9077314760849924
42
+ },
43
+ {
44
+ "task_name": "gsarti/flores_101_lao",
45
+ "prompt_name": null,
46
+ "bits_per_byte": 1.5398940450457603
47
+ },
48
+ {
49
+ "task_name": "gsarti/flores_101_lav",
50
+ "prompt_name": null,
51
+ "word_perplexity": 10925745.685132286
52
+ },
53
+ {
54
+ "task_name": "gsarti/flores_101_lav",
55
+ "prompt_name": null,
56
+ "byte_perplexity": 7.777221919194806
57
+ },
58
+ {
59
+ "task_name": "gsarti/flores_101_lav",
60
+ "prompt_name": null,
61
+ "bits_per_byte": 2.959254905963978
62
+ },
63
+ {
64
+ "task_name": "gsarti/flores_101_lin",
65
+ "prompt_name": null,
66
+ "word_perplexity": 166841.83897098716
67
+ },
68
+ {
69
+ "task_name": "gsarti/flores_101_lin",
70
+ "prompt_name": null,
71
+ "byte_perplexity": 7.524842908050988
72
+ },
73
+ {
74
+ "task_name": "gsarti/flores_101_lin",
75
+ "prompt_name": null,
76
+ "bits_per_byte": 2.9116614638468965
77
+ },
78
+ {
79
+ "task_name": "gsarti/flores_101_lit",
80
+ "prompt_name": null,
81
+ "word_perplexity": 8532364.031813102
82
+ },
83
+ {
84
+ "task_name": "gsarti/flores_101_lit",
85
+ "prompt_name": null,
86
+ "byte_perplexity": 7.369179434621725
87
+ },
88
+ {
89
+ "task_name": "gsarti/flores_101_lit",
90
+ "prompt_name": null,
91
+ "bits_per_byte": 2.88150398275188
92
+ },
93
+ {
94
+ "task_name": "gsarti/flores_101_luo",
95
+ "prompt_name": null,
96
+ "word_perplexity": 1335199.656768974
97
+ },
98
+ {
99
+ "task_name": "gsarti/flores_101_luo",
100
+ "prompt_name": null,
101
+ "byte_perplexity": 11.975963093623681
102
+ },
103
+ {
104
+ "task_name": "gsarti/flores_101_luo",
105
+ "prompt_name": null,
106
+ "bits_per_byte": 3.5820697754437467
107
+ },
108
+ {
109
+ "task_name": "gsarti/flores_101_ltz",
110
+ "prompt_name": null,
111
+ "word_perplexity": 4081613.1281958995
112
+ },
113
+ {
114
+ "task_name": "gsarti/flores_101_ltz",
115
+ "prompt_name": null,
116
+ "byte_perplexity": 8.801059747949214
117
+ },
118
+ {
119
+ "task_name": "gsarti/flores_101_ltz",
120
+ "prompt_name": null,
121
+ "bits_per_byte": 3.1376772511430198
122
+ },
123
+ {
124
+ "task_name": "gsarti/flores_101_mkd",
125
+ "prompt_name": null,
126
+ "word_perplexity": 291548.6603872499
127
+ },
128
+ {
129
+ "task_name": "gsarti/flores_101_mkd",
130
+ "prompt_name": null,
131
+ "byte_perplexity": 2.9656732291754087
132
+ },
133
+ {
134
+ "task_name": "gsarti/flores_101_mkd",
135
+ "prompt_name": null,
136
+ "bits_per_byte": 1.5683596441110415
137
+ },
138
+ {
139
+ "task_name": "gsarti/flores_101_msa",
140
+ "prompt_name": null,
141
+ "word_perplexity": 931.4191160965655
142
+ },
143
+ {
144
+ "task_name": "gsarti/flores_101_msa",
145
+ "prompt_name": null,
146
+ "byte_perplexity": 2.5710001772665634
147
+ },
148
+ {
149
+ "task_name": "gsarti/flores_101_msa",
150
+ "prompt_name": null,
151
+ "bits_per_byte": 1.3623297096432079
152
+ },
153
+ {
154
+ "task_name": "gsarti/flores_101_mal",
155
+ "prompt_name": null,
156
+ "word_perplexity": 1.207348615509252e+18
157
+ },
158
+ {
159
+ "task_name": "gsarti/flores_101_mal",
160
+ "prompt_name": null,
161
+ "byte_perplexity": 4.615948455160037
162
+ },
163
+ {
164
+ "task_name": "gsarti/flores_101_mal",
165
+ "prompt_name": null,
166
+ "bits_per_byte": 2.2066271139530245
167
+ },
168
+ {
169
+ "task_name": "gsarti/flores_101_mlt",
170
+ "prompt_name": null,
171
+ "word_perplexity": 1820552051.5260184
172
+ },
173
+ {
174
+ "task_name": "gsarti/flores_101_mlt",
175
+ "prompt_name": null,
176
+ "byte_perplexity": 15.004773437665275
177
+ },
178
+ {
179
+ "task_name": "gsarti/flores_101_mlt",
180
+ "prompt_name": null,
181
+ "bits_per_byte": 3.9073496302297994
182
+ },
183
+ {
184
+ "task_name": "gsarti/flores_101_mri",
185
+ "prompt_name": null,
186
+ "word_perplexity": 26466.98082941409
187
+ },
188
+ {
189
+ "task_name": "gsarti/flores_101_mri",
190
+ "prompt_name": null,
191
+ "byte_perplexity": 7.474035895661322
192
+ },
193
+ {
194
+ "task_name": "gsarti/flores_101_mri",
195
+ "prompt_name": null,
196
+ "bits_per_byte": 2.9018874925878335
197
+ },
198
+ {
199
+ "task_name": "gsarti/flores_101_mar",
200
+ "prompt_name": null,
201
+ "word_perplexity": 54017030487867.64
202
+ },
203
+ {
204
+ "task_name": "gsarti/flores_101_mar",
205
+ "prompt_name": null,
206
+ "byte_perplexity": 5.483253482821379
207
+ },
208
+ {
209
+ "task_name": "gsarti/flores_101_mar",
210
+ "prompt_name": null,
211
+ "bits_per_byte": 2.4550321688665875
212
+ },
213
+ {
214
+ "task_name": "gsarti/flores_101_mon",
215
+ "prompt_name": null,
216
+ "word_perplexity": 6612951.176601774
217
+ },
218
+ {
219
+ "task_name": "gsarti/flores_101_mon",
220
+ "prompt_name": null,
221
+ "byte_perplexity": 3.410598542315402
222
+ },
223
+ {
224
+ "task_name": "gsarti/flores_101_mon",
225
+ "prompt_name": null,
226
+ "bits_per_byte": 1.7700249469487581
227
+ },
228
+ {
229
+ "task_name": "gsarti/flores_101_npi",
230
+ "prompt_name": null,
231
+ "word_perplexity": 9218412485042.457
232
+ },
233
+ {
234
+ "task_name": "gsarti/flores_101_npi",
235
+ "prompt_name": null,
236
+ "byte_perplexity": 5.199342701937889
237
+ },
238
+ {
239
+ "task_name": "gsarti/flores_101_npi",
240
+ "prompt_name": null,
241
+ "bits_per_byte": 2.3783292500628397
242
+ },
243
+ {
244
+ "task_name": "gsarti/flores_101_nso",
245
+ "prompt_name": null,
246
+ "word_perplexity": 84236.45826211123
247
+ },
248
+ {
249
+ "task_name": "gsarti/flores_101_nso",
250
+ "prompt_name": null,
251
+ "byte_perplexity": 8.154626800955667
252
+ },
253
+ {
254
+ "task_name": "gsarti/flores_101_nso",
255
+ "prompt_name": null,
256
+ "bits_per_byte": 3.027618853058479
257
+ },
258
+ {
259
+ "task_name": "gsarti/flores_101_nob",
260
+ "prompt_name": null,
261
+ "word_perplexity": 36969.51682419191
262
+ },
263
+ {
264
+ "task_name": "gsarti/flores_101_nob",
265
+ "prompt_name": null,
266
+ "byte_perplexity": 5.402763169129877
267
+ },
268
+ {
269
+ "task_name": "gsarti/flores_101_nob",
270
+ "prompt_name": null,
271
+ "bits_per_byte": 2.4336974426149056
272
+ },
273
+ {
274
+ "task_name": "gsarti/flores_101_nya",
275
+ "prompt_name": null,
276
+ "word_perplexity": 6609896.030066139
277
+ },
278
+ {
279
+ "task_name": "gsarti/flores_101_nya",
280
+ "prompt_name": null,
281
+ "byte_perplexity": 8.179860208369393
282
+ },
283
+ {
284
+ "task_name": "gsarti/flores_101_nya",
285
+ "prompt_name": null,
286
+ "bits_per_byte": 3.0320761881040017
287
+ },
288
+ {
289
+ "task_name": "gsarti/flores_101_oci",
290
+ "prompt_name": null,
291
+ "word_perplexity": 21641.316763505896
292
+ },
293
+ {
294
+ "task_name": "gsarti/flores_101_oci",
295
+ "prompt_name": null,
296
+ "byte_perplexity": 4.8617357393685845
297
+ },
298
+ {
299
+ "task_name": "gsarti/flores_101_oci",
300
+ "prompt_name": null,
301
+ "bits_per_byte": 2.2814714775164466
302
+ },
303
+ {
304
+ "task_name": "gsarti/flores_101_ory",
305
+ "prompt_name": null,
306
+ "word_perplexity": 11873283711992.748
307
+ },
308
+ {
309
+ "task_name": "gsarti/flores_101_ory",
310
+ "prompt_name": null,
311
+ "byte_perplexity": 5.189421861225964
312
+ },
313
+ {
314
+ "task_name": "gsarti/flores_101_ory",
315
+ "prompt_name": null,
316
+ "bits_per_byte": 2.375573820972048
317
+ },
318
+ {
319
+ "task_name": "gsarti/flores_101_orm",
320
+ "prompt_name": null,
321
+ "word_perplexity": 944722910.1683049
322
+ },
323
+ {
324
+ "task_name": "gsarti/flores_101_orm",
325
+ "prompt_name": null,
326
+ "byte_perplexity": 12.911595421079408
327
+ },
328
+ {
329
+ "task_name": "gsarti/flores_101_orm",
330
+ "prompt_name": null,
331
+ "bits_per_byte": 3.690595373136525
332
+ },
333
+ {
334
+ "task_name": "gsarti/flores_101_pus",
335
+ "prompt_name": null,
336
+ "word_perplexity": 153261.38659736273
337
+ },
338
+ {
339
+ "task_name": "gsarti/flores_101_pus",
340
+ "prompt_name": null,
341
+ "byte_perplexity": 4.4963371422771585
342
+ },
343
+ {
344
+ "task_name": "gsarti/flores_101_pus",
345
+ "prompt_name": null,
346
+ "bits_per_byte": 2.1687502151085742
347
+ },
348
+ {
349
+ "task_name": "gsarti/flores_101_fas",
350
+ "prompt_name": null,
351
+ "word_perplexity": 44174.10652942002
352
+ },
353
+ {
354
+ "task_name": "gsarti/flores_101_fas",
355
+ "prompt_name": null,
356
+ "byte_perplexity": 3.058009097116482
357
+ },
358
+ {
359
+ "task_name": "gsarti/flores_101_fas",
360
+ "prompt_name": null,
361
+ "bits_per_byte": 1.6125926985055565
362
+ },
363
+ {
364
+ "task_name": "gsarti/flores_101_pol",
365
+ "prompt_name": null,
366
+ "word_perplexity": 104253.80848720921
367
+ },
368
+ {
369
+ "task_name": "gsarti/flores_101_pol",
370
+ "prompt_name": null,
371
+ "byte_perplexity": 4.625550458479643
372
+ },
373
+ {
374
+ "task_name": "gsarti/flores_101_pol",
375
+ "prompt_name": null,
376
+ "bits_per_byte": 2.2096250621616695
377
+ },
378
+ {
379
+ "task_name": "gsarti/flores_101_por",
380
+ "prompt_name": null,
381
+ "word_perplexity": 70.12185258792593
382
+ },
383
+ {
384
+ "task_name": "gsarti/flores_101_por",
385
+ "prompt_name": null,
386
+ "byte_perplexity": 1.9754515986213523
387
+ },
388
+ {
389
+ "task_name": "gsarti/flores_101_por",
390
+ "prompt_name": null,
391
+ "bits_per_byte": 0.9821824986646657
392
+ },
393
+ {
394
+ "task_name": "gsarti/flores_101_pan",
395
+ "prompt_name": null,
396
+ "word_perplexity": 847925284.3968099
397
+ },
398
+ {
399
+ "task_name": "gsarti/flores_101_pan",
400
+ "prompt_name": null,
401
+ "byte_perplexity": 4.698477289331806
402
+ },
403
+ {
404
+ "task_name": "gsarti/flores_101_pan",
405
+ "prompt_name": null,
406
+ "bits_per_byte": 2.2321932752863454
407
+ },
408
+ {
409
+ "task_name": "gsarti/flores_101_ron",
410
+ "prompt_name": null,
411
+ "word_perplexity": 36440.61611845943
412
+ },
413
+ {
414
+ "task_name": "gsarti/flores_101_ron",
415
+ "prompt_name": null,
416
+ "byte_perplexity": 4.965456830031304
417
+ },
418
+ {
419
+ "task_name": "gsarti/flores_101_ron",
420
+ "prompt_name": null,
421
+ "bits_per_byte": 2.31192645412871
422
+ },
423
+ {
424
+ "task_name": "gsarti/flores_101_rus",
425
+ "prompt_name": null,
426
+ "word_perplexity": 12717.27557342625
427
+ },
428
+ {
429
+ "task_name": "gsarti/flores_101_rus",
430
+ "prompt_name": null,
431
+ "byte_perplexity": 2.0498020542445303
432
+ },
433
+ {
434
+ "task_name": "gsarti/flores_101_rus",
435
+ "prompt_name": null,
436
+ "bits_per_byte": 1.0354845979511649
437
+ },
438
+ {
439
+ "task_name": "gsarti/flores_101_srp",
440
+ "prompt_name": null,
441
+ "word_perplexity": 179094.36755355867
442
+ },
443
+ {
444
+ "task_name": "gsarti/flores_101_srp",
445
+ "prompt_name": null,
446
+ "byte_perplexity": 2.871214785885079
447
+ },
448
+ {
449
+ "task_name": "gsarti/flores_101_srp",
450
+ "prompt_name": null,
451
+ "bits_per_byte": 1.5216612577275341
452
+ },
453
+ {
454
+ "task_name": "gsarti/flores_101_sna",
455
+ "prompt_name": null,
456
+ "word_perplexity": 64794029.630749054
457
+ },
458
+ {
459
+ "task_name": "gsarti/flores_101_sna",
460
+ "prompt_name": null,
461
+ "byte_perplexity": 8.462166771382726
462
+ },
463
+ {
464
+ "task_name": "gsarti/flores_101_sna",
465
+ "prompt_name": null,
466
+ "bits_per_byte": 3.0810271184378166
467
+ },
468
+ {
469
+ "task_name": "gsarti/flores_101_snd",
470
+ "prompt_name": null,
471
+ "word_perplexity": 1593844.7987764536
472
+ },
473
+ {
474
+ "task_name": "gsarti/flores_101_snd",
475
+ "prompt_name": null,
476
+ "byte_perplexity": 5.466066951221973
477
+ },
478
+ {
479
+ "task_name": "gsarti/flores_101_snd",
480
+ "prompt_name": null,
481
+ "bits_per_byte": 2.450503130846187
482
+ },
483
+ {
484
+ "task_name": "gsarti/flores_101_slk",
485
+ "prompt_name": null,
486
+ "word_perplexity": 766753.5771631876
487
+ },
488
+ {
489
+ "task_name": "gsarti/flores_101_slk",
490
+ "prompt_name": null,
491
+ "byte_perplexity": 6.450822127057479
492
+ },
493
+ {
494
+ "task_name": "gsarti/flores_101_slk",
495
+ "prompt_name": null,
496
+ "bits_per_byte": 2.6894830369770566
497
+ },
498
+ {
499
+ "task_name": "gsarti/flores_101_slv",
500
+ "prompt_name": null,
501
+ "word_perplexity": 281495.6973621906
502
+ },
503
+ {
504
+ "task_name": "gsarti/flores_101_slv",
505
+ "prompt_name": null,
506
+ "byte_perplexity": 6.620252120186232
507
+ },
508
+ {
509
+ "task_name": "gsarti/flores_101_slv",
510
+ "prompt_name": null,
511
+ "bits_per_byte": 2.726886160479057
512
+ },
513
+ {
514
+ "task_name": "gsarti/flores_101_som",
515
+ "prompt_name": null,
516
+ "word_perplexity": 9117591.536991648
517
+ },
518
+ {
519
+ "task_name": "gsarti/flores_101_som",
520
+ "prompt_name": null,
521
+ "byte_perplexity": 11.95918054093392
522
+ },
523
+ {
524
+ "task_name": "gsarti/flores_101_som",
525
+ "prompt_name": null,
526
+ "bits_per_byte": 3.5800466324138576
527
+ },
528
+ {
529
+ "task_name": "gsarti/flores_101_ckb",
530
+ "prompt_name": null,
531
+ "word_perplexity": 7641937.513844287
532
+ },
533
+ {
534
+ "task_name": "gsarti/flores_101_ckb",
535
+ "prompt_name": null,
536
+ "byte_perplexity": 3.7255124939234765
537
+ },
538
+ {
539
+ "task_name": "gsarti/flores_101_ckb",
540
+ "prompt_name": null,
541
+ "bits_per_byte": 1.8974389011678956
542
+ },
543
+ {
544
+ "task_name": "gsarti/flores_101_spa",
545
+ "prompt_name": null,
546
+ "word_perplexity": 50.48600403475257
547
+ },
548
+ {
549
+ "task_name": "gsarti/flores_101_spa",
550
+ "prompt_name": null,
551
+ "byte_perplexity": 1.8965140104323535
552
+ },
553
+ {
554
+ "task_name": "gsarti/flores_101_spa",
555
+ "prompt_name": null,
556
+ "bits_per_byte": 0.9233500295317635
557
+ },
558
+ {
559
+ "task_name": "gsarti/flores_101_swh",
560
+ "prompt_name": null,
561
+ "word_perplexity": 4756.310957867697
562
+ },
563
+ {
564
+ "task_name": "gsarti/flores_101_swh",
565
+ "prompt_name": null,
566
+ "byte_perplexity": 3.6973091886730676
567
+ },
568
+ {
569
+ "task_name": "gsarti/flores_101_swh",
570
+ "prompt_name": null,
571
+ "bits_per_byte": 1.8864756944079395
572
+ },
573
+ {
574
+ "task_name": "gsarti/flores_101_swe",
575
+ "prompt_name": null,
576
+ "word_perplexity": 50609.194691403645
577
+ },
578
+ {
579
+ "task_name": "gsarti/flores_101_swe",
580
+ "prompt_name": null,
581
+ "byte_perplexity": 5.054972008155866
582
+ },
583
+ {
584
+ "task_name": "gsarti/flores_101_swe",
585
+ "prompt_name": null,
586
+ "bits_per_byte": 2.3377031032447033
587
+ },
588
+ {
589
+ "task_name": "gsarti/flores_101_tgk",
590
+ "prompt_name": null,
591
+ "word_perplexity": 4653242.643384356
592
+ },
593
+ {
594
+ "task_name": "gsarti/flores_101_tgk",
595
+ "prompt_name": null,
596
+ "byte_perplexity": 3.5994818827380426
597
+ },
598
+ {
599
+ "task_name": "gsarti/flores_101_tgk",
600
+ "prompt_name": null,
601
+ "bits_per_byte": 1.847789256832959
602
+ },
603
+ {
604
+ "task_name": "gsarti/flores_101_tam",
605
+ "prompt_name": null,
606
+ "word_perplexity": 1.7375636861561886e+16
607
+ },
608
+ {
609
+ "task_name": "gsarti/flores_101_tam",
610
+ "prompt_name": null,
611
+ "byte_perplexity": 4.539493400469833
612
+ },
613
+ {
614
+ "task_name": "gsarti/flores_101_tam",
615
+ "prompt_name": null,
616
+ "bits_per_byte": 2.182531304254031
617
+ },
618
+ {
619
+ "task_name": "gsarti/flores_101_tel",
620
+ "prompt_name": null,
621
+ "word_perplexity": 6240250468604343.0
622
+ },
623
+ {
624
+ "task_name": "gsarti/flores_101_tel",
625
+ "prompt_name": null,
626
+ "byte_perplexity": 5.807499987508966
627
+ },
628
+ {
629
+ "task_name": "gsarti/flores_101_tel",
630
+ "prompt_name": null,
631
+ "bits_per_byte": 2.537917245931069
632
+ },
633
+ {
634
+ "task_name": "gsarti/flores_101_tha",
635
+ "prompt_name": null,
636
+ "word_perplexity": 2.7023221906004898e+31
637
+ },
638
+ {
639
+ "task_name": "gsarti/flores_101_tha",
640
+ "prompt_name": null,
641
+ "byte_perplexity": 2.365940201944242
642
+ },
643
+ {
644
+ "task_name": "gsarti/flores_101_tha",
645
+ "prompt_name": null,
646
+ "bits_per_byte": 1.242413610681628
647
+ },
648
+ {
649
+ "task_name": "gsarti/flores_101_tur",
650
+ "prompt_name": null,
651
+ "word_perplexity": 598170.0194818947
652
+ },
653
+ {
654
+ "task_name": "gsarti/flores_101_tur",
655
+ "prompt_name": null,
656
+ "byte_perplexity": 4.885014749844601
657
+ },
658
+ {
659
+ "task_name": "gsarti/flores_101_tur",
660
+ "prompt_name": null,
661
+ "bits_per_byte": 2.288362918282818
662
+ },
663
+ {
664
+ "task_name": "gsarti/flores_101_ukr",
665
+ "prompt_name": null,
666
+ "word_perplexity": 375312.1511987307
667
+ },
668
+ {
669
+ "task_name": "gsarti/flores_101_ukr",
670
+ "prompt_name": null,
671
+ "byte_perplexity": 2.7240934990288483
672
+ },
673
+ {
674
+ "task_name": "gsarti/flores_101_ukr",
675
+ "prompt_name": null,
676
+ "bits_per_byte": 1.445776221804572
677
+ },
678
+ {
679
+ "task_name": "gsarti/flores_101_umb",
680
+ "prompt_name": null,
681
+ "word_perplexity": 286182026.84727985
682
+ },
683
+ {
684
+ "task_name": "gsarti/flores_101_umb",
685
+ "prompt_name": null,
686
+ "byte_perplexity": 12.766915508610673
687
+ },
688
+ {
689
+ "task_name": "gsarti/flores_101_umb",
690
+ "prompt_name": null,
691
+ "bits_per_byte": 3.6743381063848357
692
+ },
693
+ {
694
+ "task_name": "gsarti/flores_101_urd",
695
+ "prompt_name": null,
696
+ "word_perplexity": 294.7473718166965
697
+ },
698
+ {
699
+ "task_name": "gsarti/flores_101_urd",
700
+ "prompt_name": null,
701
+ "byte_perplexity": 1.9797467071381232
702
+ },
703
+ {
704
+ "task_name": "gsarti/flores_101_urd",
705
+ "prompt_name": null,
706
+ "bits_per_byte": 0.9853158607436239
707
+ },
708
+ {
709
+ "task_name": "gsarti/flores_101_uzb",
710
+ "prompt_name": null,
711
+ "word_perplexity": 657971096.5030558
712
+ },
713
+ {
714
+ "task_name": "gsarti/flores_101_uzb",
715
+ "prompt_name": null,
716
+ "byte_perplexity": 12.002337637722146
717
+ },
718
+ {
719
+ "task_name": "gsarti/flores_101_uzb",
720
+ "prompt_name": null,
721
+ "bits_per_byte": 3.5852435148799184
722
+ },
723
+ {
724
+ "task_name": "gsarti/flores_101_vie",
725
+ "prompt_name": null,
726
+ "word_perplexity": 30.113286809710246
727
+ },
728
+ {
729
+ "task_name": "gsarti/flores_101_vie",
730
+ "prompt_name": null,
731
+ "byte_perplexity": 1.76578415476397
732
+ },
733
+ {
734
+ "task_name": "gsarti/flores_101_vie",
735
+ "prompt_name": null,
736
+ "bits_per_byte": 0.8203090021691818
737
+ },
738
+ {
739
+ "task_name": "gsarti/flores_101_cym",
740
+ "prompt_name": null,
741
+ "word_perplexity": 2638019.4579179045
742
+ },
743
+ {
744
+ "task_name": "gsarti/flores_101_cym",
745
+ "prompt_name": null,
746
+ "byte_perplexity": 12.539424151448149
747
+ },
748
+ {
749
+ "task_name": "gsarti/flores_101_cym",
750
+ "prompt_name": null,
751
+ "bits_per_byte": 3.6483991915978407
752
+ },
753
+ {
754
+ "task_name": "gsarti/flores_101_wol",
755
+ "prompt_name": null,
756
+ "word_perplexity": 119795.78671768666
757
+ },
758
+ {
759
+ "task_name": "gsarti/flores_101_wol",
760
+ "prompt_name": null,
761
+ "byte_perplexity": 9.144285650306488
762
+ },
763
+ {
764
+ "task_name": "gsarti/flores_101_wol",
765
+ "prompt_name": null,
766
+ "bits_per_byte": 3.1928704713393357
767
+ },
768
+ {
769
+ "task_name": "gsarti/flores_101_xho",
770
+ "prompt_name": null,
771
+ "word_perplexity": 54307092.21333007
772
+ },
773
+ {
774
+ "task_name": "gsarti/flores_101_xho",
775
+ "prompt_name": null,
776
+ "byte_perplexity": 7.403240538286952
777
+ },
778
+ {
779
+ "task_name": "gsarti/flores_101_xho",
780
+ "prompt_name": null,
781
+ "bits_per_byte": 2.8881569038733983
782
+ },
783
+ {
784
+ "task_name": "gsarti/flores_101_yor",
785
+ "prompt_name": null,
786
+ "word_perplexity": 130267.12232132205
787
+ },
788
+ {
789
+ "task_name": "gsarti/flores_101_yor",
790
+ "prompt_name": null,
791
+ "byte_perplexity": 5.91272037551173
792
+ },
793
+ {
794
+ "task_name": "gsarti/flores_101_yor",
795
+ "prompt_name": null,
796
+ "bits_per_byte": 2.5638220507535796
797
+ },
798
+ {
799
+ "task_name": "gsarti/flores_101_zul",
800
+ "prompt_name": null,
801
+ "word_perplexity": 493606524.8156374
802
+ },
803
+ {
804
+ "task_name": "gsarti/flores_101_zul",
805
+ "prompt_name": null,
806
+ "byte_perplexity": 8.53353320693145
807
+ },
808
+ {
809
+ "task_name": "gsarti/flores_101_zul",
810
+ "prompt_name": null,
811
+ "bits_per_byte": 3.0931431957905224
812
+ }
813
+ ],
814
+ "versions": {
815
+ "gsarti/flores_101_kor+null": 0,
816
+ "gsarti/flores_101_kir+null": 0,
817
+ "gsarti/flores_101_lao+null": 0,
818
+ "gsarti/flores_101_lav+null": 0,
819
+ "gsarti/flores_101_lin+null": 0,
820
+ "gsarti/flores_101_lit+null": 0,
821
+ "gsarti/flores_101_luo+null": 0,
822
+ "gsarti/flores_101_ltz+null": 0,
823
+ "gsarti/flores_101_mkd+null": 0,
824
+ "gsarti/flores_101_msa+null": 0,
825
+ "gsarti/flores_101_mal+null": 0,
826
+ "gsarti/flores_101_mlt+null": 0,
827
+ "gsarti/flores_101_mri+null": 0,
828
+ "gsarti/flores_101_mar+null": 0,
829
+ "gsarti/flores_101_mon+null": 0,
830
+ "gsarti/flores_101_npi+null": 0,
831
+ "gsarti/flores_101_nso+null": 0,
832
+ "gsarti/flores_101_nob+null": 0,
833
+ "gsarti/flores_101_nya+null": 0,
834
+ "gsarti/flores_101_oci+null": 0,
835
+ "gsarti/flores_101_ory+null": 0,
836
+ "gsarti/flores_101_orm+null": 0,
837
+ "gsarti/flores_101_pus+null": 0,
838
+ "gsarti/flores_101_fas+null": 0,
839
+ "gsarti/flores_101_pol+null": 0,
840
+ "gsarti/flores_101_por+null": 0,
841
+ "gsarti/flores_101_pan+null": 0,
842
+ "gsarti/flores_101_ron+null": 0,
843
+ "gsarti/flores_101_rus+null": 0,
844
+ "gsarti/flores_101_srp+null": 0,
845
+ "gsarti/flores_101_sna+null": 0,
846
+ "gsarti/flores_101_snd+null": 0,
847
+ "gsarti/flores_101_slk+null": 0,
848
+ "gsarti/flores_101_slv+null": 0,
849
+ "gsarti/flores_101_som+null": 0,
850
+ "gsarti/flores_101_ckb+null": 0,
851
+ "gsarti/flores_101_spa+null": 0,
852
+ "gsarti/flores_101_swh+null": 0,
853
+ "gsarti/flores_101_swe+null": 0,
854
+ "gsarti/flores_101_tgk+null": 0,
855
+ "gsarti/flores_101_tam+null": 0,
856
+ "gsarti/flores_101_tel+null": 0,
857
+ "gsarti/flores_101_tha+null": 0,
858
+ "gsarti/flores_101_tur+null": 0,
859
+ "gsarti/flores_101_ukr+null": 0,
860
+ "gsarti/flores_101_umb+null": 0,
861
+ "gsarti/flores_101_urd+null": 0,
862
+ "gsarti/flores_101_uzb+null": 0,
863
+ "gsarti/flores_101_vie+null": 0,
864
+ "gsarti/flores_101_cym+null": 0,
865
+ "gsarti/flores_101_wol+null": 0,
866
+ "gsarti/flores_101_xho+null": 0,
867
+ "gsarti/flores_101_yor+null": 0,
868
+ "gsarti/flores_101_zul+null": 0
869
+ },
870
+ "table_results": {
871
+ "gsarti/flores_101_kor+null": {
872
+ "task_name": "gsarti/flores_101_kor",
873
+ "prompt_name": "null",
874
+ "word_perplexity": 1199924.6918920355,
875
+ "byte_perplexity": 3.932884847226212,
876
+ "bits_per_byte": 1.9755879455567535
877
+ },
878
+ "gsarti/flores_101_kir+null": {
879
+ "task_name": "gsarti/flores_101_kir",
880
+ "prompt_name": "null",
881
+ "word_perplexity": 140474672.36703426,
882
+ "byte_perplexity": 3.729278369847201,
883
+ "bits_per_byte": 1.8988964902756764
884
+ },
885
+ "gsarti/flores_101_lao+null": {
886
+ "task_name": "gsarti/flores_101_lao",
887
+ "prompt_name": "null",
888
+ "word_perplexity": 6.1350041352351446e+26,
889
+ "byte_perplexity": 2.9077314760849924,
890
+ "bits_per_byte": 1.5398940450457603
891
+ },
892
+ "gsarti/flores_101_lav+null": {
893
+ "task_name": "gsarti/flores_101_lav",
894
+ "prompt_name": "null",
895
+ "word_perplexity": 10925745.685132286,
896
+ "byte_perplexity": 7.777221919194806,
897
+ "bits_per_byte": 2.959254905963978
898
+ },
899
+ "gsarti/flores_101_lin+null": {
900
+ "task_name": "gsarti/flores_101_lin",
901
+ "prompt_name": "null",
902
+ "word_perplexity": 166841.83897098716,
903
+ "byte_perplexity": 7.524842908050988,
904
+ "bits_per_byte": 2.9116614638468965
905
+ },
906
+ "gsarti/flores_101_lit+null": {
907
+ "task_name": "gsarti/flores_101_lit",
908
+ "prompt_name": "null",
909
+ "word_perplexity": 8532364.031813102,
910
+ "byte_perplexity": 7.369179434621725,
911
+ "bits_per_byte": 2.88150398275188
912
+ },
913
+ "gsarti/flores_101_luo+null": {
914
+ "task_name": "gsarti/flores_101_luo",
915
+ "prompt_name": "null",
916
+ "word_perplexity": 1335199.656768974,
917
+ "byte_perplexity": 11.975963093623681,
918
+ "bits_per_byte": 3.5820697754437467
919
+ },
920
+ "gsarti/flores_101_ltz+null": {
921
+ "task_name": "gsarti/flores_101_ltz",
922
+ "prompt_name": "null",
923
+ "word_perplexity": 4081613.1281958995,
924
+ "byte_perplexity": 8.801059747949214,
925
+ "bits_per_byte": 3.1376772511430198
926
+ },
927
+ "gsarti/flores_101_mkd+null": {
928
+ "task_name": "gsarti/flores_101_mkd",
929
+ "prompt_name": "null",
930
+ "word_perplexity": 291548.6603872499,
931
+ "byte_perplexity": 2.9656732291754087,
932
+ "bits_per_byte": 1.5683596441110415
933
+ },
934
+ "gsarti/flores_101_msa+null": {
935
+ "task_name": "gsarti/flores_101_msa",
936
+ "prompt_name": "null",
937
+ "word_perplexity": 931.4191160965655,
938
+ "byte_perplexity": 2.5710001772665634,
939
+ "bits_per_byte": 1.3623297096432079
940
+ },
941
+ "gsarti/flores_101_mal+null": {
942
+ "task_name": "gsarti/flores_101_mal",
943
+ "prompt_name": "null",
944
+ "word_perplexity": 1.207348615509252e+18,
945
+ "byte_perplexity": 4.615948455160037,
946
+ "bits_per_byte": 2.2066271139530245
947
+ },
948
+ "gsarti/flores_101_mlt+null": {
949
+ "task_name": "gsarti/flores_101_mlt",
950
+ "prompt_name": "null",
951
+ "word_perplexity": 1820552051.5260184,
952
+ "byte_perplexity": 15.004773437665275,
953
+ "bits_per_byte": 3.9073496302297994
954
+ },
955
+ "gsarti/flores_101_mri+null": {
956
+ "task_name": "gsarti/flores_101_mri",
957
+ "prompt_name": "null",
958
+ "word_perplexity": 26466.98082941409,
959
+ "byte_perplexity": 7.474035895661322,
960
+ "bits_per_byte": 2.9018874925878335
961
+ },
962
+ "gsarti/flores_101_mar+null": {
963
+ "task_name": "gsarti/flores_101_mar",
964
+ "prompt_name": "null",
965
+ "word_perplexity": 54017030487867.64,
966
+ "byte_perplexity": 5.483253482821379,
967
+ "bits_per_byte": 2.4550321688665875
968
+ },
969
+ "gsarti/flores_101_mon+null": {
970
+ "task_name": "gsarti/flores_101_mon",
971
+ "prompt_name": "null",
972
+ "word_perplexity": 6612951.176601774,
973
+ "byte_perplexity": 3.410598542315402,
974
+ "bits_per_byte": 1.7700249469487581
975
+ },
976
+ "gsarti/flores_101_npi+null": {
977
+ "task_name": "gsarti/flores_101_npi",
978
+ "prompt_name": "null",
979
+ "word_perplexity": 9218412485042.457,
980
+ "byte_perplexity": 5.199342701937889,
981
+ "bits_per_byte": 2.3783292500628397
982
+ },
983
+ "gsarti/flores_101_nso+null": {
984
+ "task_name": "gsarti/flores_101_nso",
985
+ "prompt_name": "null",
986
+ "word_perplexity": 84236.45826211123,
987
+ "byte_perplexity": 8.154626800955667,
988
+ "bits_per_byte": 3.027618853058479
989
+ },
990
+ "gsarti/flores_101_nob+null": {
991
+ "task_name": "gsarti/flores_101_nob",
992
+ "prompt_name": "null",
993
+ "word_perplexity": 36969.51682419191,
994
+ "byte_perplexity": 5.402763169129877,
995
+ "bits_per_byte": 2.4336974426149056
996
+ },
997
+ "gsarti/flores_101_nya+null": {
998
+ "task_name": "gsarti/flores_101_nya",
999
+ "prompt_name": "null",
1000
+ "word_perplexity": 6609896.030066139,
1001
+ "byte_perplexity": 8.179860208369393,
1002
+ "bits_per_byte": 3.0320761881040017
1003
+ },
1004
+ "gsarti/flores_101_oci+null": {
1005
+ "task_name": "gsarti/flores_101_oci",
1006
+ "prompt_name": "null",
1007
+ "word_perplexity": 21641.316763505896,
1008
+ "byte_perplexity": 4.8617357393685845,
1009
+ "bits_per_byte": 2.2814714775164466
1010
+ },
1011
+ "gsarti/flores_101_ory+null": {
1012
+ "task_name": "gsarti/flores_101_ory",
1013
+ "prompt_name": "null",
1014
+ "word_perplexity": 11873283711992.748,
1015
+ "byte_perplexity": 5.189421861225964,
1016
+ "bits_per_byte": 2.375573820972048
1017
+ },
1018
+ "gsarti/flores_101_orm+null": {
1019
+ "task_name": "gsarti/flores_101_orm",
1020
+ "prompt_name": "null",
1021
+ "word_perplexity": 944722910.1683049,
1022
+ "byte_perplexity": 12.911595421079408,
1023
+ "bits_per_byte": 3.690595373136525
1024
+ },
1025
+ "gsarti/flores_101_pus+null": {
1026
+ "task_name": "gsarti/flores_101_pus",
1027
+ "prompt_name": "null",
1028
+ "word_perplexity": 153261.38659736273,
1029
+ "byte_perplexity": 4.4963371422771585,
1030
+ "bits_per_byte": 2.1687502151085742
1031
+ },
1032
+ "gsarti/flores_101_fas+null": {
1033
+ "task_name": "gsarti/flores_101_fas",
1034
+ "prompt_name": "null",
1035
+ "word_perplexity": 44174.10652942002,
1036
+ "byte_perplexity": 3.058009097116482,
1037
+ "bits_per_byte": 1.6125926985055565
1038
+ },
1039
+ "gsarti/flores_101_pol+null": {
1040
+ "task_name": "gsarti/flores_101_pol",
1041
+ "prompt_name": "null",
1042
+ "word_perplexity": 104253.80848720921,
1043
+ "byte_perplexity": 4.625550458479643,
1044
+ "bits_per_byte": 2.2096250621616695
1045
+ },
1046
+ "gsarti/flores_101_por+null": {
1047
+ "task_name": "gsarti/flores_101_por",
1048
+ "prompt_name": "null",
1049
+ "word_perplexity": 70.12185258792593,
1050
+ "byte_perplexity": 1.9754515986213523,
1051
+ "bits_per_byte": 0.9821824986646657
1052
+ },
1053
+ "gsarti/flores_101_pan+null": {
1054
+ "task_name": "gsarti/flores_101_pan",
1055
+ "prompt_name": "null",
1056
+ "word_perplexity": 847925284.3968099,
1057
+ "byte_perplexity": 4.698477289331806,
1058
+ "bits_per_byte": 2.2321932752863454
1059
+ },
1060
+ "gsarti/flores_101_ron+null": {
1061
+ "task_name": "gsarti/flores_101_ron",
1062
+ "prompt_name": "null",
1063
+ "word_perplexity": 36440.61611845943,
1064
+ "byte_perplexity": 4.965456830031304,
1065
+ "bits_per_byte": 2.31192645412871
1066
+ },
1067
+ "gsarti/flores_101_rus+null": {
1068
+ "task_name": "gsarti/flores_101_rus",
1069
+ "prompt_name": "null",
1070
+ "word_perplexity": 12717.27557342625,
1071
+ "byte_perplexity": 2.0498020542445303,
1072
+ "bits_per_byte": 1.0354845979511649
1073
+ },
1074
+ "gsarti/flores_101_srp+null": {
1075
+ "task_name": "gsarti/flores_101_srp",
1076
+ "prompt_name": "null",
1077
+ "word_perplexity": 179094.36755355867,
1078
+ "byte_perplexity": 2.871214785885079,
1079
+ "bits_per_byte": 1.5216612577275341
1080
+ },
1081
+ "gsarti/flores_101_sna+null": {
1082
+ "task_name": "gsarti/flores_101_sna",
1083
+ "prompt_name": "null",
1084
+ "word_perplexity": 64794029.630749054,
1085
+ "byte_perplexity": 8.462166771382726,
1086
+ "bits_per_byte": 3.0810271184378166
1087
+ },
1088
+ "gsarti/flores_101_snd+null": {
1089
+ "task_name": "gsarti/flores_101_snd",
1090
+ "prompt_name": "null",
1091
+ "word_perplexity": 1593844.7987764536,
1092
+ "byte_perplexity": 5.466066951221973,
1093
+ "bits_per_byte": 2.450503130846187
1094
+ },
1095
+ "gsarti/flores_101_slk+null": {
1096
+ "task_name": "gsarti/flores_101_slk",
1097
+ "prompt_name": "null",
1098
+ "word_perplexity": 766753.5771631876,
1099
+ "byte_perplexity": 6.450822127057479,
1100
+ "bits_per_byte": 2.6894830369770566
1101
+ },
1102
+ "gsarti/flores_101_slv+null": {
1103
+ "task_name": "gsarti/flores_101_slv",
1104
+ "prompt_name": "null",
1105
+ "word_perplexity": 281495.6973621906,
1106
+ "byte_perplexity": 6.620252120186232,
1107
+ "bits_per_byte": 2.726886160479057
1108
+ },
1109
+ "gsarti/flores_101_som+null": {
1110
+ "task_name": "gsarti/flores_101_som",
1111
+ "prompt_name": "null",
1112
+ "word_perplexity": 9117591.536991648,
1113
+ "byte_perplexity": 11.95918054093392,
1114
+ "bits_per_byte": 3.5800466324138576
1115
+ },
1116
+ "gsarti/flores_101_ckb+null": {
1117
+ "task_name": "gsarti/flores_101_ckb",
1118
+ "prompt_name": "null",
1119
+ "word_perplexity": 7641937.513844287,
1120
+ "byte_perplexity": 3.7255124939234765,
1121
+ "bits_per_byte": 1.8974389011678956
1122
+ },
1123
+ "gsarti/flores_101_spa+null": {
1124
+ "task_name": "gsarti/flores_101_spa",
1125
+ "prompt_name": "null",
1126
+ "word_perplexity": 50.48600403475257,
1127
+ "byte_perplexity": 1.8965140104323535,
1128
+ "bits_per_byte": 0.9233500295317635
1129
+ },
1130
+ "gsarti/flores_101_swh+null": {
1131
+ "task_name": "gsarti/flores_101_swh",
1132
+ "prompt_name": "null",
1133
+ "word_perplexity": 4756.310957867697,
1134
+ "byte_perplexity": 3.6973091886730676,
1135
+ "bits_per_byte": 1.8864756944079395
1136
+ },
1137
+ "gsarti/flores_101_swe+null": {
1138
+ "task_name": "gsarti/flores_101_swe",
1139
+ "prompt_name": "null",
1140
+ "word_perplexity": 50609.194691403645,
1141
+ "byte_perplexity": 5.054972008155866,
1142
+ "bits_per_byte": 2.3377031032447033
1143
+ },
1144
+ "gsarti/flores_101_tgk+null": {
1145
+ "task_name": "gsarti/flores_101_tgk",
1146
+ "prompt_name": "null",
1147
+ "word_perplexity": 4653242.643384356,
1148
+ "byte_perplexity": 3.5994818827380426,
1149
+ "bits_per_byte": 1.847789256832959
1150
+ },
1151
+ "gsarti/flores_101_tam+null": {
1152
+ "task_name": "gsarti/flores_101_tam",
1153
+ "prompt_name": "null",
1154
+ "word_perplexity": 1.7375636861561886e+16,
1155
+ "byte_perplexity": 4.539493400469833,
1156
+ "bits_per_byte": 2.182531304254031
1157
+ },
1158
+ "gsarti/flores_101_tel+null": {
1159
+ "task_name": "gsarti/flores_101_tel",
1160
+ "prompt_name": "null",
1161
+ "word_perplexity": 6240250468604343.0,
1162
+ "byte_perplexity": 5.807499987508966,
1163
+ "bits_per_byte": 2.537917245931069
1164
+ },
1165
+ "gsarti/flores_101_tha+null": {
1166
+ "task_name": "gsarti/flores_101_tha",
1167
+ "prompt_name": "null",
1168
+ "word_perplexity": 2.7023221906004898e+31,
1169
+ "byte_perplexity": 2.365940201944242,
1170
+ "bits_per_byte": 1.242413610681628
1171
+ },
1172
+ "gsarti/flores_101_tur+null": {
1173
+ "task_name": "gsarti/flores_101_tur",
1174
+ "prompt_name": "null",
1175
+ "word_perplexity": 598170.0194818947,
1176
+ "byte_perplexity": 4.885014749844601,
1177
+ "bits_per_byte": 2.288362918282818
1178
+ },
1179
+ "gsarti/flores_101_ukr+null": {
1180
+ "task_name": "gsarti/flores_101_ukr",
1181
+ "prompt_name": "null",
1182
+ "word_perplexity": 375312.1511987307,
1183
+ "byte_perplexity": 2.7240934990288483,
1184
+ "bits_per_byte": 1.445776221804572
1185
+ },
1186
+ "gsarti/flores_101_umb+null": {
1187
+ "task_name": "gsarti/flores_101_umb",
1188
+ "prompt_name": "null",
1189
+ "word_perplexity": 286182026.84727985,
1190
+ "byte_perplexity": 12.766915508610673,
1191
+ "bits_per_byte": 3.6743381063848357
1192
+ },
1193
+ "gsarti/flores_101_urd+null": {
1194
+ "task_name": "gsarti/flores_101_urd",
1195
+ "prompt_name": "null",
1196
+ "word_perplexity": 294.7473718166965,
1197
+ "byte_perplexity": 1.9797467071381232,
1198
+ "bits_per_byte": 0.9853158607436239
1199
+ },
1200
+ "gsarti/flores_101_uzb+null": {
1201
+ "task_name": "gsarti/flores_101_uzb",
1202
+ "prompt_name": "null",
1203
+ "word_perplexity": 657971096.5030558,
1204
+ "byte_perplexity": 12.002337637722146,
1205
+ "bits_per_byte": 3.5852435148799184
1206
+ },
1207
+ "gsarti/flores_101_vie+null": {
1208
+ "task_name": "gsarti/flores_101_vie",
1209
+ "prompt_name": "null",
1210
+ "word_perplexity": 30.113286809710246,
1211
+ "byte_perplexity": 1.76578415476397,
1212
+ "bits_per_byte": 0.8203090021691818
1213
+ },
1214
+ "gsarti/flores_101_cym+null": {
1215
+ "task_name": "gsarti/flores_101_cym",
1216
+ "prompt_name": "null",
1217
+ "word_perplexity": 2638019.4579179045,
1218
+ "byte_perplexity": 12.539424151448149,
1219
+ "bits_per_byte": 3.6483991915978407
1220
+ },
1221
+ "gsarti/flores_101_wol+null": {
1222
+ "task_name": "gsarti/flores_101_wol",
1223
+ "prompt_name": "null",
1224
+ "word_perplexity": 119795.78671768666,
1225
+ "byte_perplexity": 9.144285650306488,
1226
+ "bits_per_byte": 3.1928704713393357
1227
+ },
1228
+ "gsarti/flores_101_xho+null": {
1229
+ "task_name": "gsarti/flores_101_xho",
1230
+ "prompt_name": "null",
1231
+ "word_perplexity": 54307092.21333007,
1232
+ "byte_perplexity": 7.403240538286952,
1233
+ "bits_per_byte": 2.8881569038733983
1234
+ },
1235
+ "gsarti/flores_101_yor+null": {
1236
+ "task_name": "gsarti/flores_101_yor",
1237
+ "prompt_name": "null",
1238
+ "word_perplexity": 130267.12232132205,
1239
+ "byte_perplexity": 5.91272037551173,
1240
+ "bits_per_byte": 2.5638220507535796
1241
+ },
1242
+ "gsarti/flores_101_zul+null": {
1243
+ "task_name": "gsarti/flores_101_zul",
1244
+ "prompt_name": "null",
1245
+ "word_perplexity": 493606524.8156374,
1246
+ "byte_perplexity": 8.53353320693145,
1247
+ "bits_per_byte": 3.0931431957905224
1248
+ }
1249
+ },
1250
+ "config": {
1251
+ "adaptive_seq_len": true,
1252
+ "num_fewshot": 0,
1253
+ "bootstrap_iters": 100000
1254
+ }
1255
+ }
evaluation/results/tr11/bloom350m/humaneval_temp02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.00817073170731707, "pass@10": 0.020465171677199096, "pass@100": 0.024390015529347924}
evaluation/results/tr11/bloom350m/humaneval_temp06.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.0064939024390243925, "pass@10": 0.030182658898012457, "pass@100": 0.06233670887015886}
evaluation/results/tr11/get_templates.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DATASETS_AND_CONFIGS=(
2
+ piaf,None,None
3
+ GEM/wiki_lingua,ar,ar
4
+ GEM/wiki_lingua,en,en
5
+ GEM/wiki_lingua,es,es
6
+ GEM/wiki_lingua,fr,fr
7
+ GEM/wiki_lingua,hi,hi
8
+ GEM/wiki_lingua,id,id
9
+ GEM/wiki_lingua,pt,pt
10
+ GEM/wiki_lingua,vi,vi
11
+ GEM/wiki_lingua,zh,zh
12
+ GEM/web_nlg,en,en
13
+ GEM/web_nlg,ru,ru
14
+ wmt14,fr-en,fr-en
15
+ )
16
+
17
+ # Unique ones: 0 1 2 5 6 7 8 9 10 11
18
+ for val in {0..12}; do
19
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$val]}
20
+ IFS=',' read dataset_name dataset_config_name template_config_name <<< "${DATASET_AND_CONFIG}"
21
+ echo $dataset_config_name
22
+ python evaluation/results/tr13/tzeroeval/get_templates.py \
23
+ --dataset_name $dataset_name \
24
+ --dataset_config_name $dataset_config_name \
25
+ --template_config_name $template_config_name
26
+ done
27
+
evaluation/results/tr11/opt/humaneval_temp02.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.0, "pass@10": 0.0, "pass@100": 0.0}
evaluation/results/tr11/opt/humaneval_temp08.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pass@1": 0.0, "pass@10": 0.0, "pass@100": 0.0}
evaluation/results/tr11/scripts/download.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Downloads the specified taks in the evaluation harness
2
+ # This is particularly useful when running in environments where the GPU nodes
3
+ # do not have internet access. This way we can pre-download them and use the cached data-set during evaluation.
4
+
5
+ from lm_eval import tasks
6
+ from lm_eval.tasks import ALL_TASKS
7
+ import argparse
8
+ import os
9
+
10
+
11
+ parser = argparse.ArgumentParser(description='Download evaluation harness', allow_abbrev=False)
12
+ parser.add_argument('--task_list', type=str, default = "all", help='Either "all" or comma separated list of tasks to download.')
13
+ args = parser.parse_args()
14
+
15
+ def main():
16
+ task_list = ALL_TASKS if args.task_list == 'all' else args.task_list.split(',')
17
+ tasks.get_task_dict(task_list)
18
+
19
+ if __name__ == '__main__':
20
+ main()
21
+
evaluation/results/tr11/scripts/multi_eurlex_tmp.slurm ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=multieurlex
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --constraint=a100
9
+ #SBATCH --reservation=hug
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
17
+ conda activate muennighofflmevalgen
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # defining the right environment variables
22
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
23
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
24
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
25
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+ export TOKENIZERS_PARALLELISM=false
29
+
30
+ # Converted transformer checkpoint
31
+ #MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3/bloom-7b1
32
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmixv2lossseq
33
+
34
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
35
+
36
+ DATASETS_AND_CONFIGS=(
37
+ multi_eurlex_mt,multi,"version-fr-en-source+target"
38
+ multi_eurlex_mt,multi,"version-en-fr-source+target"
39
+ multi_eurlex_mt,multi,"a_good_translation-fr-en-source+target"
40
+ multi_eurlex_mt,multi,"a_good_translation-en-fr-source+target"
41
+ multi_eurlex_mt,multi,"prev_doc-en-fr"
42
+ multi_eurlex_mt,multi,"prev_doc-fr-en"
43
+ )
44
+
45
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
46
+ echo $ARGUMENT
47
+
48
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
49
+
50
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
51
+ python main.py \
52
+ --model_api_name 'hf-causal' \
53
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
54
+ --device cuda \
55
+ --batch_size 16 \
56
+ --no_tracking \
57
+ --task_name $dataset_name \
58
+ --template_names $template_name \
59
+ --bootstrap_iters 10 \
60
+ --num_fewshot 0 \
61
+ --limit 500
62
+
63
+ echo "END TIME: $(date)"
evaluation/results/tr11/scripts/report-to-csv.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # this script converts results.json:
4
+ #
5
+ # "results": {
6
+ # "arc_challenge": {
7
+ # "acc": 0.24232081911262798,
8
+ # "acc_stderr": 0.01252159329580012,
9
+ # "acc_norm": 0.2764505119453925,
10
+ # "acc_norm_stderr": 0.013069662474252425
11
+ # },
12
+ #
13
+ # into a format expected by a spreadsheet, which is:
14
+ #
15
+ # task metric value err
16
+ # arc_challenge acc xxx yyy
17
+ # arc_challenge acc_norm xxx yyy
18
+ # arc_challenge f1 xxx yyy
19
+ #
20
+ # usage:
21
+ # report-to-csv.py results.json
22
+
23
+
24
+ import sys
25
+ import json
26
+ import io
27
+ import csv
28
+
29
+ results_file = sys.argv[1]
30
+
31
+ csv_file = results_file.replace("json", "csv")
32
+
33
+ print(f"Converting {results_file} to {csv_file}")
34
+
35
+ with io.open(results_file, 'r', encoding='utf-8') as f:
36
+ results = json.load(f)
37
+
38
+ with io.open(csv_file, 'w', encoding='utf-8') as f:
39
+
40
+ writer = csv.writer(f)
41
+ writer.writerow(["task", "metric", "value", "err", "version"])
42
+
43
+ versions = results["versions"]
44
+
45
+ for k,v in sorted(results["results"].items()):
46
+ if k not in versions:
47
+ versions[k] = -1
48
+
49
+ if "acc" in v:
50
+ writer.writerow([k, "acc", v["acc"], v["acc_stderr"], versions[k]])
51
+ if "acc_norm" in v:
52
+ writer.writerow([k, "acc_norm", v["acc_norm"], v["acc_norm_stderr"], versions[k]])
53
+ if "f1" in v:
54
+ writer.writerow([k, "f1", v["f1"], v["f1_stderr"] if "f1_stderr" in v else "", versions[k]])
55
+ # if "ppl" in v:
56
+ # writer.writerow([k, "ppl", v["ppl"], v["ppl_stderr"], versions[k]])
57
+ # if "em" in v:
58
+ # writer.writerow([k, "em", v["em"], v["em_stderr"] if "em_stderr" in v else "", versions[k]])
evaluation/results/tr11/scripts/run_bsevalharness_generation_176b.slurm ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=genbseval
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=1
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:8 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ conda activate muennighofflmevalgen
20
+
21
+ echo "START TIME: $(date)"
22
+
23
+ # defining the right environment variables
24
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
25
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
26
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
27
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+ export TOKENIZERS_PARALLELISM=false
31
+
32
+ # Converted transformer checkpoint
33
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/uan68tv-model-conversion/bloom
34
+
35
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
36
+
37
+
38
+ DATASETS_AND_CONFIGS=(
39
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
40
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
41
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
42
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
43
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
44
+ GEM/wiki_lingua_en,en,"article_summary_en"
45
+ GEM/wiki_lingua_en,en,"write_abstract_en"
46
+ GEM/wiki_lingua_en,en,"summarize_above_en"
47
+ GEM/wiki_lingua_en,en,"rephrase_en"
48
+ GEM/wiki_lingua_en,en,"tldr_en"
49
+ GEM/wiki_lingua_es,es,"article_summary_es"
50
+ GEM/wiki_lingua_es,es,"write_abstract_es"
51
+ GEM/wiki_lingua_es,es,"summarize_above_es"
52
+ GEM/wiki_lingua_es,es,"rephrase_es"
53
+ GEM/wiki_lingua_es,es,"tldr_es"
54
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
55
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
56
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
57
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
58
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
59
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
60
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
61
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
62
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
63
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
64
+ GEM/wiki_lingua_id,id,"article_summary_id"
65
+ GEM/wiki_lingua_id,id,"write_abstract_id"
66
+ GEM/wiki_lingua_id,id,"summarize_above_id"
67
+ GEM/wiki_lingua_id,id,"rephrase_id"
68
+ GEM/wiki_lingua_id,id,"tldr_id"
69
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
70
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
71
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
72
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
73
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
74
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
75
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
76
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
77
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
78
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
79
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
80
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
81
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
82
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
83
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
84
+ )
85
+
86
+ DATASETS_AND_CONFIGS=(
87
+ wmt14_fr_en,fr-en,"version-en-fr-target"
88
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
89
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
90
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
91
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
92
+ wmt14_fr_en,fr-en,"version-fr-en-target"
93
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
94
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
95
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
96
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
97
+ )
98
+
99
+ DATASETS_AND_CONFIGS=(
100
+ GEM/web_nlg_en,en,"PALM_prompt"
101
+ GEM/web_nlg_en,en,"explicit-graph-description-2"
102
+ GEM/web_nlg_en,en,"implicit-graph-description"
103
+ GEM/web_nlg_en,en,"non-explicit-description"
104
+ GEM/web_nlg_en,en,"use-category"
105
+ GEM/web_nlg_ru,ru,"PALM_prompt"
106
+ GEM/web_nlg_ru,ru,"explicit-graph-description-2-Russian"
107
+ GEM/web_nlg_ru,ru,"implicit-graph-description-Russian"
108
+ GEM/web_nlg_ru,ru,"non-explicit-description-Russian"
109
+ GEM/web_nlg_ru,ru,"use-category-Russian"
110
+ )
111
+
112
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
113
+ echo $ARGUMENT
114
+
115
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
116
+
117
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
118
+ python main.py \
119
+ --model_api_name 'hf-causal' \
120
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=bfloat16 \
121
+ --device cuda \
122
+ --batch_size 8 \
123
+ --no_tracking \
124
+ --task_name $dataset_name \
125
+ --template_names $template_name \
126
+ --bootstrap_iters 10
127
+
128
+ echo "END TIME: $(date)"
evaluation/results/tr11/scripts/run_bsevalharness_generation_350m.slurm ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-generation-350m
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ conda activate muennighofflmevalgen
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # defining the right environment variables
21
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
22
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
23
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
24
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+ export TOKENIZERS_PARALLELISM=false
28
+
29
+ # Converted transformer checkpoint
30
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/commun/experiments/muennighoff/bloomckpt/350m/bloom-350m
31
+
32
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
33
+
34
+ # WMT19 ZH-EN does not work
35
+ DATASETS_AND_CONFIGS=(
36
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
37
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
38
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
39
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
40
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
41
+ GEM/wiki_lingua_en,en,"article_summary_en"
42
+ GEM/wiki_lingua_en,en,"write_abstract_en"
43
+ GEM/wiki_lingua_en,en,"summarize_above_en"
44
+ GEM/wiki_lingua_en,en,"rephrase_en"
45
+ GEM/wiki_lingua_en,en,"tldr_en"
46
+ GEM/wiki_lingua_es,es,"article_summary_es"
47
+ GEM/wiki_lingua_es,es,"write_abstract_es"
48
+ GEM/wiki_lingua_es,es,"summarize_above_es"
49
+ GEM/wiki_lingua_es,es,"rephrase_es"
50
+ GEM/wiki_lingua_es,es,"tldr_es"
51
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
52
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
53
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
54
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
55
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
56
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
57
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
58
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
59
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
60
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
61
+ GEM/wiki_lingua_id,id,"article_summary_id"
62
+ GEM/wiki_lingua_id,id,"write_abstract_id"
63
+ GEM/wiki_lingua_id,id,"summarize_above_id"
64
+ GEM/wiki_lingua_id,id,"rephrase_id"
65
+ GEM/wiki_lingua_id,id,"tldr_id"
66
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
67
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
68
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
69
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
70
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
71
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
72
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
73
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
74
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
75
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
76
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
77
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
78
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
79
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
80
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
81
+ )
82
+
83
+ #GEM/wiki_lingua_ar,ar,"article_summary_ar"
84
+ #GEM/wiki_lingua_ar,ar,"write_abstract_ar"
85
+ #GEM/wiki_lingua_ar,ar,"summarize_above_ar"
86
+ #GEM/wiki_lingua_ar,ar,"rephrase_ar"
87
+ #GEM/wiki_lingua_ar,ar,"tldr_ar"
88
+ #GEM/wiki_lingua_zh,zh,"article_summary_zh"
89
+ #GEM/wiki_lingua_zh,zh,"write_abstract_zh"
90
+ #GEM/wiki_lingua_zh,zh,"summarize_above_zh"
91
+ #GEM/wiki_lingua_zh,zh,"rephrase_zh"
92
+ #GEM/wiki_lingua_zh,zh,"tldr_zh"
93
+
94
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
95
+ echo $ARGUMENT
96
+
97
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
98
+
99
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
100
+ python main.py \
101
+ --model_api_name 'hf-causal' \
102
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
103
+ --device cuda \
104
+ --batch_size 16 \
105
+ --no_tracking \
106
+ --task_name $dataset_name \
107
+ --template_names $template_name \
108
+ --bootstrap_iters 10
109
+
110
+ echo "END TIME: $(date)"
evaluation/results/tr11/scripts/run_bsevalharness_generation_760m.slurm ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-generation-760m
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
16
+ conda activate muennighofflmevalgen
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # defining the right environment variables
21
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
22
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
23
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
24
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+ export TOKENIZERS_PARALLELISM=false
28
+
29
+ # Converted transformer checkpoint
30
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/760m/bloom-760m
31
+
32
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
33
+
34
+ # WMT19 ZH-EN does not work
35
+ DATASETS_AND_CONFIGS=(
36
+ GEM/wiki_lingua_ar,ar,"article_summary_ar"
37
+ GEM/wiki_lingua_ar,ar,"write_abstract_ar"
38
+ GEM/wiki_lingua_ar,ar,"summarize_above_ar"
39
+ GEM/wiki_lingua_ar,ar,"rephrase_ar"
40
+ GEM/wiki_lingua_ar,ar,"tldr_ar"
41
+ GEM/wiki_lingua_en,en,"article_summary_en"
42
+ GEM/wiki_lingua_en,en,"write_abstract_en"
43
+ GEM/wiki_lingua_en,en,"summarize_above_en"
44
+ GEM/wiki_lingua_en,en,"rephrase_en"
45
+ GEM/wiki_lingua_en,en,"tldr_en"
46
+ GEM/wiki_lingua_es,es,"article_summary_es"
47
+ GEM/wiki_lingua_es,es,"write_abstract_es"
48
+ GEM/wiki_lingua_es,es,"summarize_above_es"
49
+ GEM/wiki_lingua_es,es,"rephrase_es"
50
+ GEM/wiki_lingua_es,es,"tldr_es"
51
+ GEM/wiki_lingua_fr,fr,"article_summary_fr"
52
+ GEM/wiki_lingua_fr,fr,"write_abstract_fr"
53
+ GEM/wiki_lingua_fr,fr,"summarize_above_fr"
54
+ GEM/wiki_lingua_fr,fr,"rephrase_fr"
55
+ GEM/wiki_lingua_fr,fr,"tldr_fr"
56
+ GEM/wiki_lingua_hi,hi,"article_summary_hi"
57
+ GEM/wiki_lingua_hi,hi,"write_abstract_hi"
58
+ GEM/wiki_lingua_hi,hi,"summarize_above_hi"
59
+ GEM/wiki_lingua_hi,hi,"rephrase_hi"
60
+ GEM/wiki_lingua_hi,hi,"tldr_hi"
61
+ GEM/wiki_lingua_id,id,"article_summary_id"
62
+ GEM/wiki_lingua_id,id,"write_abstract_id"
63
+ GEM/wiki_lingua_id,id,"summarize_above_id"
64
+ GEM/wiki_lingua_id,id,"rephrase_id"
65
+ GEM/wiki_lingua_id,id,"tldr_id"
66
+ GEM/wiki_lingua_pt,pt,"article_summary_pt"
67
+ GEM/wiki_lingua_pt,pt,"write_abstract_pt"
68
+ GEM/wiki_lingua_pt,pt,"summarize_above_pt"
69
+ GEM/wiki_lingua_pt,pt,"rephrase_pt"
70
+ GEM/wiki_lingua_pt,pt,"tldr_pt"
71
+ GEM/wiki_lingua_vi,vi,"article_summary_vi"
72
+ GEM/wiki_lingua_vi,vi,"write_abstract_vi"
73
+ GEM/wiki_lingua_vi,vi,"summarize_above_vi"
74
+ GEM/wiki_lingua_vi,vi,"rephrase_vi"
75
+ GEM/wiki_lingua_vi,vi,"tldr_vi"
76
+ GEM/wiki_lingua_zh,zh,"article_summary_zh"
77
+ GEM/wiki_lingua_zh,zh,"write_abstract_zh"
78
+ GEM/wiki_lingua_zh,zh,"summarize_above_zh"
79
+ GEM/wiki_lingua_zh,zh,"rephrase_zh"
80
+ GEM/wiki_lingua_zh,zh,"tldr_zh"
81
+ )
82
+
83
+ #GEM/wiki_lingua_ar,ar,"article_summary_ar"
84
+ #GEM/wiki_lingua_ar,ar,"write_abstract_ar"
85
+ #GEM/wiki_lingua_ar,ar,"summarize_above_ar"
86
+ #GEM/wiki_lingua_ar,ar,"rephrase_ar"
87
+ #GEM/wiki_lingua_ar,ar,"tldr_ar"
88
+ #GEM/wiki_lingua_zh,zh,"article_summary_zh"
89
+ #GEM/wiki_lingua_zh,zh,"write_abstract_zh"
90
+ #GEM/wiki_lingua_zh,zh,"summarize_above_zh"
91
+ #GEM/wiki_lingua_zh,zh,"rephrase_zh"
92
+ #GEM/wiki_lingua_zh,zh,"tldr_zh"
93
+
94
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
95
+ echo $ARGUMENT
96
+
97
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
98
+
99
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
100
+ python main.py \
101
+ --model_api_name 'hf-causal' \
102
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
103
+ --device cuda \
104
+ --batch_size 16 \
105
+ --no_tracking \
106
+ --task_name $dataset_name \
107
+ --template_names $template_name \
108
+ --bootstrap_iters 10
109
+
110
+ echo "END TIME: $(date)"
evaluation/results/tr11/scripts/run_bsevalharness_tr11c-2b5-ml.slurm ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11c-2b5-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # a unique identifier for the current eval ideally correspnding to the modelname
23
+ VARIANT="tr11c-2b5-ml-bsevalharness"
24
+
25
+
26
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml/checkpoints/main/global_step337250
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+ export TOKENIZERS_PARALLELISM=false
36
+
37
+ cd $MEGATRON_DEEPSPEED_REPO
38
+
39
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
40
+
41
+ PP_SIZE=1
42
+ TP_SIZE=1
43
+ SEQ_LEN=2048
44
+
45
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
46
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
47
+ EVAL_MICRO_BATCH_SIZE=1
48
+
49
+ #dummy arguments to make megatron happy.
50
+ MEGATRON_REQUIRED_ARGS=" \
51
+ --num-layers -1 \
52
+ --hidden-size -1 \
53
+ --num-attention-heads -1 \
54
+ --seq-length -1 \
55
+ --max-position-embeddings -1 \
56
+ "
57
+
58
+
59
+ ZERO_STAGE=0
60
+
61
+ config_json="./ds_config.json"
62
+
63
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
64
+ cat <<EOT > $config_json
65
+ {
66
+ "train_micro_batch_size_per_gpu": 1,
67
+ "train_batch_size": 1,
68
+ "gradient_clipping": 1.0,
69
+ "zero_optimization": {
70
+ "stage": $ZERO_STAGE
71
+ },
72
+ "bf16": {
73
+ "enabled": false
74
+ },
75
+ "steps_per_print": 2000,
76
+ "wall_clock_breakdown": false
77
+ }
78
+ EOT
79
+
80
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
81
+ --load $CHECKPOINT_PATH \
82
+ --results_path $VARIANT-results.json \
83
+ --tensor-model-parallel-size $TP_SIZE \
84
+ --pipeline-model-parallel-size $PP_SIZE \
85
+ --tokenizer-type PretrainedFromHF \
86
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
87
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
88
+ --no-load-optim \
89
+ --no-load-rng \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list axb,axg,boolq,cb,cola,copa,crows_pairs_english,crows_pairs_french,diabla,e2e_nlg_cleaned,mnli,mnli_mismatched,multirc,piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
93
+ --eval_fp32 \
94
+ --deepspeed \
95
+ --deepspeed_config ds_config.json \
96
+ --intermed_results \
97
+ --adaptive_seq_len \
98
+ --micro_bs_multiplier 8 \
99
+ $MEGATRON_REQUIRED_ARGS \
100
+ "
101
+
102
+ GPUS_PER_NODE=1
103
+ NNODES=$SLURM_NNODES
104
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
105
+ MASTER_PORT=6000
106
+ export LAUNCHER="python -u -m torch.distributed.run \
107
+ --nproc_per_node $GPUS_PER_NODE \
108
+ --nnodes $NNODES \
109
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
110
+ --rdzv_backend c10d \
111
+ --max_restarts 0 \
112
+ --tee 3 \
113
+ "
114
+
115
+ export CUDA_LAUNCH_BLOCKING=1
116
+
117
+ echo $LAUNCHER $CMD
118
+
119
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
120
+
121
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_bsevalharness_tr11e-350m-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11e-350m-ml
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # a unique identifier for the current eval ideally correspnding to the modelname
21
+ VARIANT="tr11e-350m-ml-bsevalharness"
22
+
23
+
24
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml/checkpoints/main/global_step659500
25
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bslmeval/Megatron-DeepSpeed
26
+ export HF_DATASETS_OFFLINE=1
27
+ export TRANSFORMERS_OFFLINE=1
28
+
29
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
30
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
31
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
32
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
33
+ export TOKENIZERS_PARALLELISM=false
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --inference \
90
+ --seq-length $SEQ_LEN \
91
+ --task_list piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
92
+ --eval_fp32 \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 4 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6002
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_bsevalharness_tr11f-6b3-ml.slurm ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_bsevalharness-tr11f-6b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # a unique identifier for the current eval ideally correspnding to the modelname
23
+ VARIANT="tr11f-6b3-ml-bsevalharness"
24
+
25
+
26
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml/checkpoints/main/global_step337500
27
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bslmeval/Megatron-DeepSpeed
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
32
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
33
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
34
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
35
+ export TOKENIZERS_PARALLELISM=false
36
+
37
+ cd $MEGATRON_DEEPSPEED_REPO
38
+
39
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
40
+
41
+ PP_SIZE=1
42
+ TP_SIZE=1
43
+ SEQ_LEN=2048
44
+
45
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
46
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
47
+ EVAL_MICRO_BATCH_SIZE=1
48
+
49
+ #dummy arguments to make megatron happy.
50
+ MEGATRON_REQUIRED_ARGS=" \
51
+ --num-layers -1 \
52
+ --hidden-size -1 \
53
+ --num-attention-heads -1 \
54
+ --seq-length -1 \
55
+ --max-position-embeddings -1 \
56
+ "
57
+
58
+
59
+ ZERO_STAGE=0
60
+
61
+ config_json="./ds_config.json"
62
+
63
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
64
+ cat <<EOT > $config_json
65
+ {
66
+ "train_micro_batch_size_per_gpu": 1,
67
+ "train_batch_size": 1,
68
+ "gradient_clipping": 1.0,
69
+ "zero_optimization": {
70
+ "stage": $ZERO_STAGE
71
+ },
72
+ "bf16": {
73
+ "enabled": false
74
+ },
75
+ "steps_per_print": 2000,
76
+ "wall_clock_breakdown": false
77
+ }
78
+ EOT
79
+
80
+ CMD="./tasks/eval_harness/evaluate_bsevalharness.py \
81
+ --load $CHECKPOINT_PATH \
82
+ --results_path $VARIANT-results.json \
83
+ --tensor-model-parallel-size $TP_SIZE \
84
+ --pipeline-model-parallel-size $PP_SIZE \
85
+ --tokenizer-type PretrainedFromHF \
86
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
87
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
88
+ --no-load-optim \
89
+ --no-load-rng \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list axb,axg,boolq,cb,cola,copa,crows_pairs_english,crows_pairs_french,diabla,e2e_nlg_cleaned,mnli,mnli_mismatched,multirc,piaf,qqp,rte,sst,tydiqa_primary,tydiqa_secondary,wic,wsc,wnli,wino_bias_type1_anti,wino_bias_type1_pro,wino_bias_type2_anti,wino_bias_type2_pro,xquad_ar,xquad_en,gsarti/flores_101_afr,gsarti/flores_101_amh,gsarti/flores_101_ara,gsarti/flores_101_hye,gsarti/flores_101_asm,gsarti/flores_101_ast,gsarti/flores_101_azj,gsarti/flores_101_bel,gsarti/flores_101_ben,gsarti/flores_101_bos,gsarti/flores_101_bul,gsarti/flores_101_mya,gsarti/flores_101_cat,gsarti/flores_101_ceb,gsarti/flores_101_zho_simpl,gsarti/flores_101_zho_trad,gsarti/flores_101_hrv,gsarti/flores_101_ces,gsarti/flores_101_dan,gsarti/flores_101_nld,gsarti/flores_101_eng,gsarti/flores_101_est,gsarti/flores_101_tgl,gsarti/flores_101_fin,gsarti/flores_101_fra,gsarti/flores_101_ful,gsarti/flores_101_glg,gsarti/flores_101_lug,gsarti/flores_101_kat,gsarti/flores_101_deu,gsarti/flores_101_ell,gsarti/flores_101_guj,gsarti/flores_101_hau,gsarti/flores_101_heb,gsarti/flores_101_hin,gsarti/flores_101_hun,gsarti/flores_101_isl,gsarti/flores_101_ibo,gsarti/flores_101_ind,gsarti/flores_101_gle,gsarti/flores_101_ita,gsarti/flores_101_jpn,gsarti/flores_101_jav,gsarti/flores_101_kea,gsarti/flores_101_kam,gsarti/flores_101_kan,gsarti/flores_101_kaz,gsarti/flores_101_khm,gsarti/flores_101_kor,gsarti/flores_101_kir,gsarti/flores_101_lao,gsarti/flores_101_lav,gsarti/flores_101_lin,gsarti/flores_101_lit,gsarti/flores_101_luo,gsarti/flores_101_ltz,gsarti/flores_101_mkd,gsarti/flores_101_msa,gsarti/flores_101_mal,gsarti/flores_101_mlt,gsarti/flores_101_mri,gsarti/flores_101_mar,gsarti/flores_101_mon,gsarti/flores_101_npi,gsarti/flores_101_nso,gsarti/flores_101_nob,gsarti/flores_101_nya,gsarti/flores_101_oci,gsarti/flores_101_ory,gsarti/flores_101_orm,gsarti/flores_101_pus,gsarti/flores_101_fas,gsarti/flores_101_pol,gsarti/flores_101_por,gsarti/flores_101_pan,gsarti/flores_101_ron,gsarti/flores_101_rus,gsarti/flores_101_srp,gsarti/flores_101_sna,gsarti/flores_101_snd,gsarti/flores_101_slk,gsarti/flores_101_slv,gsarti/flores_101_som,gsarti/flores_101_ckb,gsarti/flores_101_spa,gsarti/flores_101_swh,gsarti/flores_101_swe,gsarti/flores_101_tgk,gsarti/flores_101_tam,gsarti/flores_101_tel,gsarti/flores_101_tha,gsarti/flores_101_tur,gsarti/flores_101_ukr,gsarti/flores_101_umb,gsarti/flores_101_urd,gsarti/flores_101_uzb,gsarti/flores_101_vie,gsarti/flores_101_cym,gsarti/flores_101_wol,gsarti/flores_101_xho,gsarti/flores_101_yor,gsarti/flores_101_zul \
93
+ --eval_fp32 \
94
+ --deepspeed \
95
+ --deepspeed_config ds_config.json \
96
+ --intermed_results \
97
+ --adaptive_seq_len \
98
+ --micro_bs_multiplier 8 \
99
+ $MEGATRON_REQUIRED_ARGS \
100
+ "
101
+
102
+ GPUS_PER_NODE=1
103
+ NNODES=$SLURM_NNODES
104
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
105
+ MASTER_PORT=6000
106
+ export LAUNCHER="python -u -m torch.distributed.run \
107
+ --nproc_per_node $GPUS_PER_NODE \
108
+ --nnodes $NNODES \
109
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
110
+ --rdzv_backend c10d \
111
+ --max_restarts 0 \
112
+ --tee 3 \
113
+ "
114
+
115
+ export CUDA_LAUNCH_BLOCKING=1
116
+
117
+ echo $LAUNCHER $CMD
118
+
119
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
120
+
121
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_deepspeed.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to run lm-eval on Megatron-DeepSpeed checkpoint using the original setup
2
+
3
+ This particular setup uses the normal deepspeed checkpoint and requires no conversion to Megatron-LM.
4
+
5
+ This doc assumes usage on JZ, so some peculiar requirements in places. Ignore these if you're not running this on JZ.
6
+
7
+ ## Prerequisites
8
+
9
+ 1. Install software
10
+
11
+ On login console with external network
12
+
13
+ Get lm-eval harness (https://github.com/EleutherAI/lm-evaluation-harness) and `best-download==0.0.7` needed to download some tasks.
14
+ ```
15
+ start-prod
16
+ pip install best-download==0.0.7
17
+ pip install git+https://github.com/EleutherAI/lm-evaluation-harness
18
+ ```
19
+
20
+ 2. Pre-download needed datasets
21
+
22
+ some symlinks due to lm-harness' issues with relative position of data
23
+ ```
24
+ mkdir data
25
+ ln -s `pwd`/data tasks/eval_harness/data
26
+ ```
27
+ Also make sure `data` is not on one of the limited paritions like WORKSF.
28
+
29
+ Then install datasets for the tasks:
30
+ ```
31
+ python ./tasks/eval_harness/download.py --task_list
32
+ arc_challenge,arc_easy,boolq,copa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc
33
+ ```
34
+ and make sure that `export HF_DATASETS_OFFLINE=1`
35
+
36
+ If there are things like custom tokenizers, pre-download those too, e.g.:
37
+
38
+ ```
39
+ python -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bigscience/oscar_13_languages_alpha_weight')"
40
+ ```
41
+ and make sure that `export TRANSFORMERS_OFFLINE=1` is in the script.
42
+ You know there is a custom tokenizer if the training script had something like:
43
+
44
+ ```
45
+ --tokenizer-type PretrainedFromHF \
46
+ --tokenizer-name-or-path bigscience/oscar_13_languages_alpha_weight \
47
+ ```
48
+
49
+ 3. Prepare the slurm script
50
+
51
+ Prepare the run script, replace `variant` with a unique identifier for the current eval so that multiple evals could run in parallel and not all log into the same `results.json` file. so, e.g., `tr9c-1B3-swiglu`
52
+
53
+ ```
54
+ cp examples/run_evalharness_deepspeed.slurm run_evalharness-variant.slurm
55
+ ```
56
+
57
+ now edit `run_evalharness-variant.slurm`
58
+
59
+
60
+ Note that the eval code knows to pull the original training args from the checkpoint, so we don't need to pass any of those. And we just need to setup the evaluation args.
61
+
62
+ Note that for the bigscience lm-eval-harness fork (https://github.com/bigscience-workshop/lm-evaluation-harness), the corresponding scripts are `evaluate_bsevalharness.py` & `run_bsevalharness_tr11-176b-ml.slurm`.
63
+
64
+ 1. Edit:
65
+
66
+ ```
67
+ PP_SIZE=1
68
+ TP_SIZE=1
69
+ ```
70
+ to match the eval topology. If the model fits into 1 gpu, then there is nothing to change.
71
+
72
+ The eval script will automatically reshape the model if it was of a different topology.
73
+
74
+
75
+ 2. Adjust the following to fit the chosen GPU. As of last check for 1.3B model the settings are one of:
76
+ ```
77
+ EVAL_MICRO_BATCH_SIZE=6 # 16GB GPU 1.3B model
78
+ EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
79
+ ```
80
+
81
+ If you get OOM lower it further.
82
+
83
+ 3. If not using the Deepspeed path, disable it by removing:
84
+
85
+ ```
86
+ --deepspeed \
87
+ --deepspeed_config ds_config.json \
88
+ ```
89
+
90
+ If you didn't disable it and the program crashed on checkpoint loading unable to find some key, disable deepspeed as explained above.
91
+
92
+ 4. Additional flags
93
+
94
+ - To reduce the amount of iterations for stderr estimation, use e.g. `--bootstrap_iters 2`. This saves 1-2 minutes per dataset.
95
+ - To print intermediate results when running multiple tasks use `--intermed_results`.
96
+ - To reduce the bubble when setting PP use the flag `--micro_bs_multiplier`. Reducing `--micro-batch-size` may be needed when increasing the multiplier.
97
+ - Running the 176B model with PP=8, `--micro_bs_multiplier 8` & `--micro-batch-size 4` produced the fastest results for PiQA on 1 node in 2min18s.
98
+
99
+ ## Eval
100
+
101
+ Currently it takes 2-3 hours to run on 32GB for 1.3B model, 6-7h for 16GB GPU, so a 20h slurm job should be enough.
102
+
103
+ When ready, launch:
104
+ ```
105
+ sbatch ./run_evalharness-variant.slurm
106
+ ```
107
+
108
+ To monitor progress:
109
+ ```
110
+ tail -f tail -f $VARIANT-eval-harness.log
111
+ ```
112
+ where the variant is what you set `$VARIANT` to in the slurm script.
113
+
114
+ The template is set up for 16GB gpu since they are easier to get by. If you change to 32GB, adjust:
115
+ ```
116
+ #SBATCH --constraint=v100-32g
117
+ ...
118
+ EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
119
+ ```
120
+
121
+
122
+ Note that the original ETA at the start of the run can be 10x too longer than the actual outcome. For example it may suggest 18 hours but will complete in 2 hours.
123
+
124
+
125
+ ## Short eval
126
+
127
+ if you just want to quickly test that everything can run to the end, edit `tasks/eval_harness/evaluate.py`, e.g. to run only 10 batches:
128
+ ```
129
+ - results = evaluator.evaluate(adaptor, task_dict, False, 0, None)
130
+ + results = evaluator.evaluate(adaptor, task_dict, False, 0, 10)
131
+ ```
132
+
133
+ (XXX: could be a cmd line option so that code won't need to be modified)
134
+
135
+
136
+ ## Import into spreadsheet
137
+
138
+ https://docs.google.com/spreadsheets/d/1CI8Q9RCblLRzUOPJ6ViqBmo284-8ojluQ-CmaEuhuv0/edit?usp=sharing
139
+
140
+ Note that the spreadsheet format is quite different, so use this script:
141
+ ```
142
+ ./tasks/eval_harness/report-to-csv.py results.json
143
+ ```
144
+ to reformat the json results into csv while changing its shape to match the spreadsheet format
145
+
146
+ Since some records might be missing or extraneous here is the best way to do it:
147
+
148
+ 1. copy the data from first 2 columns to some place under the main spreadsheet
149
+
150
+ 2. put the pointer to the 3rd column next to where the 2 first columns were copied.
151
+
152
+ 3. import `results.csv` using file-> import -> file ->
153
+
154
+ Import location: Replace data at selected cell
155
+
156
+ 4. Now it should be easy to align the new records with the old ones - delete irrelevant records and Insert->Cells where data is missing until the first 2 columns match
157
+
158
+ 5. now create 2 cols in the main table on top and now it should be safe to Copy-n-Paste the 2-col data range, without the task/metrics columns into the newly created space.
evaluation/results/tr11/scripts/run_evalharness_deepspeed.slurm ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=eval-harness-deepspeed
3
+ #SBATCH --constraint=v100-16g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@gpu
12
+
13
+
14
+ set -x -e
15
+
16
+ source $six_ALL_CCFRWORK/start-prod
17
+
18
+ echo "START TIME: $(date)"
19
+
20
+ # a unique identifier for the current eval so that multiple evals could run in parallel and not all log into the same "results.json" file.
21
+ VARIANT="tr9c-1B3-swiglu"
22
+
23
+ CHECKPOINT_PATH=/gpfsdsstore/projects/rech/six/commun/checkpoints/tr3m-1B3-emb-norm-pile/global_step296023
24
+ MEGATRON_DEEPSPEED_REPO=/gpfsssd/worksf/projects/rech/six/commun/code/eval/Megatron-DeepSpeed
25
+
26
+ # you want these 2 on JZ, and pre-download/cache any datasets/tokenizers/models
27
+ # but comment these out if you're running on a node with Internet access
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ cd $MEGATRON_DEEPSPEED_REPO
32
+
33
+ # eval topology
34
+ PP_SIZE=1
35
+ TP_SIZE=1
36
+
37
+ VOCAB_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-vocab.json
38
+ MERGE_FILE=$MEGATRON_DEEPSPEED_REPO/data/gpt2-merges.txt
39
+ SEQ_LEN=2048
40
+
41
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
42
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
43
+
44
+ EVAL_MICRO_BATCH_SIZE=6 # 16GB GPU 1.3B model
45
+ #EVAL_MICRO_BATCH_SIZE=12 # 32GB GPU 1.3B model
46
+
47
+
48
+ #dummy arguments to make megatron happy.
49
+ MEGATRON_REQUIRED_ARGS=" \
50
+ --num-layers -1 \
51
+ --hidden-size -1 \
52
+ --num-attention-heads -1 \
53
+ --seq-length -1 \
54
+ --max-position-embeddings -1
55
+ "
56
+
57
+
58
+ ZERO_STAGE=0
59
+
60
+ config_json="./ds_config.json"
61
+ cat <<EOT > $config_json
62
+ {
63
+ "train_micro_batch_size_per_gpu": 1,
64
+ "train_batch_size": 1,
65
+ "zero_optimization": { "stage": $ZERO_STAGE },
66
+ "fp16": { "enabled": true },
67
+ "steps_per_print": 2000,
68
+ "wall_clock_breakdown": false
69
+ }
70
+ EOT
71
+
72
+ CMD="./tasks/eval_harness/evaluate.py \
73
+ --load $CHECKPOINT_PATH \
74
+ --results_path $VARIANT-results.json \
75
+ --tensor-model-parallel-size $TP_SIZE \
76
+ --pipeline-model-parallel-size $PP_SIZE \
77
+ --vocab-file $VOCAB_FILE \
78
+ --merge-file $MERGE_FILE \
79
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
80
+ --no-load-optim \
81
+ --no-load-rng \
82
+ --inference \
83
+ --deepspeed \
84
+ --deepspeed_config ds_config.json \
85
+ --seq-length $SEQ_LEN \
86
+ --adaptive_seq_len \
87
+ --eval_fp32 \
88
+ --task_list arc_challenge,arc_easy,boolq,copa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sst,webqs,wic,winogrande,wnli,wsc,triviaqa,sciq \
89
+ $MEGATRON_REQUIRED_ARGS \
90
+ "
91
+
92
+ N_GPUS=1
93
+ LAUNCHER="deepspeed --num_gpus $N_GPUS"
94
+ echo $LAUNCHER $CMD
95
+
96
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
97
+
98
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11-176b-ml.slurm ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11-176b-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:8 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11-176b-ml"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11-176B-ml/checkpoints/main/global_step50000
26
+ MEGATRON_DEEPSPEED_REPO=/gpfsssd/worksf/projects/rech/six/commun/code/eval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=8
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": true
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --bf16 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 16 \
98
+ --offloadearly \
99
+ $MEGATRON_REQUIRED_ARGS \
100
+ "
101
+
102
+ GPUS_PER_NODE=8
103
+ NNODES=$SLURM_NNODES
104
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
105
+ MASTER_PORT=6000
106
+ export LAUNCHER="python -u -m torch.distributed.run \
107
+ --nproc_per_node $GPUS_PER_NODE \
108
+ --nnodes $NNODES \
109
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
110
+ --rdzv_backend c10d \
111
+ --max_restarts 0 \
112
+ --tee 3 \
113
+ "
114
+
115
+ export CUDA_LAUNCH_BLOCKING=1
116
+
117
+ echo $LAUNCHER $CMD
118
+
119
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
120
+
121
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11b-1b3-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11b-2b5-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11b-1b3-ml-evalharness"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11b-1B3-ml/checkpoints/main/global_step340500
26
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --eval_fp32 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 8 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6000
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11c-2b5-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11b-2b5-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11b-2b5-ml-evalharness"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11c-2B5-ml/checkpoints/main/global_step337250
26
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --eval_fp32 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 8 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6000
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11d-760m-ml.slurm ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11d-760m-ml
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-py38-pt111
16
+
17
+ echo "START TIME: $(date)"
18
+
19
+ # a unique identifier for the current eval ideally correspnding to the modelname
20
+ VARIANT="tr11d-760m-ml-evalharness"
21
+
22
+ #/gpfsscratch/rech/six/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
23
+ CHECKPOINT_PATH=/gpfsscratch/rech/six/commun/checkpoints/tr11d-760M-ml/checkpoints/main/global_step660750
24
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+
28
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
29
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
30
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
31
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
32
+
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
36
+
37
+ PP_SIZE=1
38
+ TP_SIZE=1
39
+ SEQ_LEN=2048
40
+
41
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
42
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
43
+ EVAL_MICRO_BATCH_SIZE=1
44
+
45
+ #dummy arguments to make megatron happy.
46
+ MEGATRON_REQUIRED_ARGS=" \
47
+ --num-layers -1 \
48
+ --hidden-size -1 \
49
+ --num-attention-heads -1 \
50
+ --seq-length -1 \
51
+ --max-position-embeddings -1 \
52
+ "
53
+
54
+
55
+ ZERO_STAGE=0
56
+
57
+ config_json="./ds_config.json"
58
+
59
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
60
+ cat <<EOT > $config_json
61
+ {
62
+ "train_micro_batch_size_per_gpu": 1,
63
+ "train_batch_size": 1,
64
+ "gradient_clipping": 1.0,
65
+ "zero_optimization": {
66
+ "stage": $ZERO_STAGE
67
+ },
68
+ "bf16": {
69
+ "enabled": false
70
+ },
71
+ "steps_per_print": 2000,
72
+ "wall_clock_breakdown": false
73
+ }
74
+ EOT
75
+
76
+
77
+ CMD="./tasks/eval_harness/evaluate.py \
78
+ --load $CHECKPOINT_PATH \
79
+ --results_path $VARIANT-results.json \
80
+ --tensor-model-parallel-size $TP_SIZE \
81
+ --pipeline-model-parallel-size $PP_SIZE \
82
+ --tokenizer-type PretrainedFromHF \
83
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
84
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
85
+ --no-load-optim \
86
+ --no-load-rng \
87
+ --eval_fp32 \
88
+ --inference \
89
+ --seq-length $SEQ_LEN \
90
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
91
+ --deepspeed \
92
+ --deepspeed_config ds_config.json \
93
+ --intermed_results \
94
+ --adaptive_seq_len \
95
+ --micro_bs_multiplier 8 \
96
+ $MEGATRON_REQUIRED_ARGS \
97
+ "
98
+
99
+ GPUS_PER_NODE=1
100
+ NNODES=$SLURM_NNODES
101
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
102
+ MASTER_PORT=6000
103
+ export LAUNCHER="python -u -m torch.distributed.run \
104
+ --nproc_per_node $GPUS_PER_NODE \
105
+ --nnodes $NNODES \
106
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
107
+ --rdzv_backend c10d \
108
+ --max_restarts 0 \
109
+ --tee 3 \
110
+ "
111
+
112
+ export CUDA_LAUNCH_BLOCKING=1
113
+
114
+ echo $LAUNCHER $CMD
115
+
116
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
117
+
118
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11e-350m-ml.slurm ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11e-350m-ml
3
+ #SBATCH --constraint=v100-32g
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --gres=gpu:1 # number of gpus
9
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
10
+ #SBATCH --output=%x-%j.out # output file name
11
+ #SBATCH --account=six@v100
12
+
13
+ set -x -e
14
+
15
+ source $six_ALL_CCFRWORK/start-py38-pt111
16
+
17
+ echo "START TIME: $(date)"
18
+
19
+ # a unique identifier for the current eval ideally correspnding to the modelname
20
+ VARIANT="tr11e-350m-ml-evalharness"
21
+
22
+
23
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11e-350M-ml/checkpoints/main/global_step659500
24
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
25
+ export HF_DATASETS_OFFLINE=1
26
+ export TRANSFORMERS_OFFLINE=1
27
+
28
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
29
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
30
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
31
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
32
+
33
+ cd $MEGATRON_DEEPSPEED_REPO
34
+
35
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
36
+
37
+ PP_SIZE=1
38
+ TP_SIZE=1
39
+ SEQ_LEN=2048
40
+
41
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
42
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
43
+ EVAL_MICRO_BATCH_SIZE=1
44
+
45
+ #dummy arguments to make megatron happy.
46
+ MEGATRON_REQUIRED_ARGS=" \
47
+ --num-layers -1 \
48
+ --hidden-size -1 \
49
+ --num-attention-heads -1 \
50
+ --seq-length -1 \
51
+ --max-position-embeddings -1 \
52
+ "
53
+
54
+
55
+ ZERO_STAGE=0
56
+
57
+ config_json="./ds_config.json"
58
+
59
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
60
+ cat <<EOT > $config_json
61
+ {
62
+ "train_micro_batch_size_per_gpu": 1,
63
+ "train_batch_size": 1,
64
+ "gradient_clipping": 1.0,
65
+ "zero_optimization": {
66
+ "stage": $ZERO_STAGE
67
+ },
68
+ "bf16": {
69
+ "enabled": false
70
+ },
71
+ "steps_per_print": 2000,
72
+ "wall_clock_breakdown": false
73
+ }
74
+ EOT
75
+
76
+
77
+ CMD="./tasks/eval_harness/evaluate.py \
78
+ --load $CHECKPOINT_PATH \
79
+ --results_path $VARIANT-results.json \
80
+ --tensor-model-parallel-size $TP_SIZE \
81
+ --pipeline-model-parallel-size $PP_SIZE \
82
+ --tokenizer-type PretrainedFromHF \
83
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
84
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
85
+ --no-load-optim \
86
+ --no-load-rng \
87
+ --eval_fp32 \
88
+ --inference \
89
+ --seq-length $SEQ_LEN \
90
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
91
+ --deepspeed \
92
+ --deepspeed_config ds_config.json \
93
+ --intermed_results \
94
+ --adaptive_seq_len \
95
+ --micro_bs_multiplier 8 \
96
+ $MEGATRON_REQUIRED_ARGS \
97
+ "
98
+
99
+ GPUS_PER_NODE=1
100
+ NNODES=$SLURM_NNODES
101
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
102
+ MASTER_PORT=6000
103
+ export LAUNCHER="python -u -m torch.distributed.run \
104
+ --nproc_per_node $GPUS_PER_NODE \
105
+ --nnodes $NNODES \
106
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
107
+ --rdzv_backend c10d \
108
+ --max_restarts 0 \
109
+ --tee 3 \
110
+ "
111
+
112
+ export CUDA_LAUNCH_BLOCKING=1
113
+
114
+ echo $LAUNCHER $CMD
115
+
116
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
117
+
118
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_evalharness_tr11f-6b3-ml.slurm ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_evalharness-tr11f-6b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-py38-pt111
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr11f-6b3-ml-evalharness"
23
+
24
+
25
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml/checkpoints/main/global_step337500
26
+ MEGATRON_DEEPSPEED_REPO=/gpfsssd/worksf/projects/rech/six/commun/code/eval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+
35
+ cd $MEGATRON_DEEPSPEED_REPO
36
+
37
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
38
+
39
+ PP_SIZE=1
40
+ TP_SIZE=1
41
+ SEQ_LEN=2048
42
+
43
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
44
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
45
+ EVAL_MICRO_BATCH_SIZE=1
46
+
47
+ #dummy arguments to make megatron happy.
48
+ MEGATRON_REQUIRED_ARGS=" \
49
+ --num-layers -1 \
50
+ --hidden-size -1 \
51
+ --num-attention-heads -1 \
52
+ --seq-length -1 \
53
+ --max-position-embeddings -1 \
54
+ "
55
+
56
+
57
+ ZERO_STAGE=0
58
+
59
+ config_json="./ds_config.json"
60
+
61
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
62
+ cat <<EOT > $config_json
63
+ {
64
+ "train_micro_batch_size_per_gpu": 1,
65
+ "train_batch_size": 1,
66
+ "gradient_clipping": 1.0,
67
+ "zero_optimization": {
68
+ "stage": $ZERO_STAGE
69
+ },
70
+ "bf16": {
71
+ "enabled": false
72
+ },
73
+ "steps_per_print": 2000,
74
+ "wall_clock_breakdown": false
75
+ }
76
+ EOT
77
+
78
+
79
+ CMD="./tasks/eval_harness/evaluate.py \
80
+ --load $CHECKPOINT_PATH \
81
+ --results_path $VARIANT-results.json \
82
+ --tensor-model-parallel-size $TP_SIZE \
83
+ --pipeline-model-parallel-size $PP_SIZE \
84
+ --tokenizer-type PretrainedFromHF \
85
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
86
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
87
+ --no-load-optim \
88
+ --no-load-rng \
89
+ --eval_fp32 \
90
+ --inference \
91
+ --seq-length $SEQ_LEN \
92
+ --task_list arc_challenge,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc \
93
+ --deepspeed \
94
+ --deepspeed_config ds_config.json \
95
+ --intermed_results \
96
+ --adaptive_seq_len \
97
+ --micro_bs_multiplier 4 \
98
+ $MEGATRON_REQUIRED_ARGS \
99
+ "
100
+
101
+ GPUS_PER_NODE=1
102
+ NNODES=$SLURM_NNODES
103
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
104
+ MASTER_PORT=6000
105
+ export LAUNCHER="python -u -m torch.distributed.run \
106
+ --nproc_per_node $GPUS_PER_NODE \
107
+ --nnodes $NNODES \
108
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
109
+ --rdzv_backend c10d \
110
+ --max_restarts 0 \
111
+ --tee 3 \
112
+ "
113
+
114
+ export CUDA_LAUNCH_BLOCKING=1
115
+
116
+ echo $LAUNCHER $CMD
117
+
118
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
119
+
120
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr11/scripts/run_trevalharness_7b1.slurm ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=run_trevalharness-tr11f-6b3-ml
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ #conda activate muennighofflmevalgen
19
+ conda activate thomas_t_zero_evaluation
20
+
21
+ echo "START TIME: $(date)"
22
+
23
+ # defining the right environment variables
24
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
25
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
26
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
27
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+ export TOKENIZERS_PARALLELISM=false
31
+
32
+ # Converted transformer checkpoint
33
+ #MODEL_CKPT=/gpfsscratch/rech/six/commun/uan68tv-model-conversion/bloom
34
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3/bloom-7b1
35
+
36
+ cd /gpfsscratch/rech/six/commun/commun/experiments/muennighoff/bslmevaltransformers/lm-evaluation-harness
37
+
38
+
39
+ DATASETS_AND_CONFIGS=(
40
+ arc_challenge
41
+ arc_easy
42
+ )
43
+ #,arc_easy,boolq,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,rte,sciq,sst,triviaqa,webqs,wic,winogrande,wnli,wsc
44
+
45
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
46
+ echo $ARGUMENT
47
+ IFS=',' read dataset_name <<< "${DATASET_AND_CONFIG}"
48
+
49
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
50
+ python main.py \
51
+ --model gpt2 \
52
+ --model_args pretrained=$MODEL_CKPT \
53
+ --batch_size 16 \
54
+ --tasks $dataset_name \
55
+ --output_path "${MODEL_CKPT}_{$dataset_name}.json" \
56
+ --skip_tokenizer \
57
+ --no_cache \
58
+ --dtype=float16
59
+
60
+ echo "END TIME: $(date)"
evaluation/results/tr13/download_bslmeval.slurm ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=download-bslmeval
3
+ #SBATCH --partition=prepost
4
+ #SBATCH --nodes=1
5
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
6
+ #SBATCH --cpus-per-task=10 # number of cores per tasks
7
+ #SBATCH --hint=nomultithread # we get physical cores not logical
8
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --account=six@cpu
11
+
12
+ set -x -e
13
+
14
+ echo "START TIME: $(date)"
15
+
16
+ source $six_ALL_CCFRWORK/start-py38-pt111
17
+ conda activate muennighofflmeval
18
+
19
+ #export HF_DATASETS_OFFLINE=1
20
+ #export TRANSFORMERS_OFFLINE=1
21
+
22
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
23
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
24
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
25
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
26
+ export TOKENIZERS_PARALLELISM=false
27
+
28
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/lm-evaluation-harness
29
+
30
+ # GEM/web_nlg_en,GEM/web_nlg_en_challenge_test_numbers,GEM/web_nlg_en_challenge_test_scramble,GEM/web_nlg_en_challenge_validation_sample,GEM/web_nlg_ru,GEM/web_nlg_ru_challenge_test_scramble,GEM/web_nlg_ru_challenge_validation_sample,GEM/wiki_auto_asset_turk_challenge_test_asset_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp02,GEM/wiki_auto_asset_turk_challenge_test_asset_bfp05,GEM/wiki_auto_asset_turk_challenge_test_asset_nopunc,GEM/wiki_auto_asset_turk_challenge_test_turk_backtranslation,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp02,GEM/wiki_auto_asset_turk_challenge_test_turk_bfp05,GEM/wiki_auto_asset_turk_challenge_test_turk_nopunc,GEM/wiki_auto_asset_turk_test_asset,GEM/wiki_auto_asset_turk_test_turk,GEM/wiki_lingua_ar,GEM/wiki_lingua_cs,GEM/wiki_lingua_de,GEM/wiki_lingua_en,GEM/wiki_lingua_es,GEM/wiki_lingua_fr,GEM/wiki_lingua_hi,GEM/wiki_lingua_id,GEM/wiki_lingua_it,GEM/wiki_lingua_ja,GEM/wiki_lingua_ko,GEM/wiki_lingua_nl,GEM/wiki_lingua_pt,GEM/wiki_lingua_ru,GEM/wiki_lingua_th,GEM/wiki_lingua_tr,GEM/wiki_lingua_vi,GEM/wiki_lingua_zh,gem_xsum,gem_xsum_challenge_sample,gem_xsum_challenge_test_backtranslation,gem_xsum_challenge_test_bfp_02,gem_xsum_challenge_test_bfp_05,gem_xsum_challenge_test_covid,gem_xsum_challenge_test_nopunc \
31
+ python3 main.py --model hf-causal \
32
+ --model_args pretrained=hf-internal-testing/tiny-random-gpt2,use_accelerate=True,tokenizer=hf-internal-testing/tiny-random-gpt2,dtype=float16 \
33
+ --tasks wmt14_fr_en,wmt19_ru_en,wmt19_zh_en \
34
+ --device cuda \
35
+ --limit 1 \
36
+ --no_cache \
37
+ --num_fewshot 0
evaluation/results/tr13/lmeval/megdsbslmeval.slurm ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=tr13-base-eval
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --nodes=1
6
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
7
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
8
+ #SBATCH --hint=nomultithread # we get physical cores not logical
9
+ #SBATCH --gres=gpu:1 # number of gpus
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --reservation=hug
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-muennighofflmeval
18
+
19
+ echo "START TIME: $(date)"
20
+
21
+ # a unique identifier for the current eval ideally correspnding to the modelname
22
+ VARIANT="tr13-base"
23
+
24
+ CHECKPOINT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr11f-6B3-ml/checkpoints/main/global_step163750
25
+ #CHECKPOINT_PATH=/gpfsscratch/rech/six/commun/checkpoints/tr13f-6B3-ml-t0/checkpoints/loss/global_step3100
26
+ MEGATRON_DEEPSPEED_REPO=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/megdsbslmeval/Megatron-DeepSpeed
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+
30
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
31
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasetseval
32
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
33
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
34
+ export TOKENIZERS_PARALLELISM=false
35
+
36
+ cd $MEGATRON_DEEPSPEED_REPO
37
+
38
+ TOKENIZER_NAME_OR_PATH=bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
39
+
40
+ PP_SIZE=1
41
+ TP_SIZE=1
42
+ SEQ_LEN=2048
43
+
44
+ # different from the training MICRO_BATCH_SIZE - no optim memory, so can do bigger BS
45
+ # make as big as it can fit into gpu w/o OOM, but not too close to 100%
46
+ EVAL_MICRO_BATCH_SIZE=1
47
+
48
+ #dummy arguments to make megatron happy.
49
+ MEGATRON_REQUIRED_ARGS=" \
50
+ --num-layers -1 \
51
+ --hidden-size -1 \
52
+ --num-attention-heads -1 \
53
+ --seq-length -1 \
54
+ --max-position-embeddings -1 \
55
+ "
56
+
57
+
58
+ ZERO_STAGE=0
59
+
60
+ config_json="./ds_config.json"
61
+
62
+ # Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size()
63
+ cat <<EOT > $config_json
64
+ {
65
+ "train_micro_batch_size_per_gpu": 1,
66
+ "train_batch_size": 1,
67
+ "gradient_clipping": 1.0,
68
+ "zero_optimization": {
69
+ "stage": $ZERO_STAGE
70
+ },
71
+ "bf16": {
72
+ "enabled": false
73
+ },
74
+ "steps_per_print": 2000,
75
+ "wall_clock_breakdown": false
76
+ }
77
+ EOT
78
+
79
+
80
+ # Only in evalharness:hellaswag ; winogrande
81
+ TASKS=(
82
+ anli_r1
83
+ anli_r2
84
+ anli_r3
85
+ cb
86
+ rte
87
+ wsc.fixed
88
+ wic
89
+ copa
90
+ xcopa_id
91
+ xcopa_sw
92
+ xcopa_ta
93
+ xcopa_vi
94
+ xcopa_zh
95
+ )
96
+
97
+
98
+ CMD="./tasks/eval_harness/evaluate_bsevalharness_prefix.py \
99
+ --load $CHECKPOINT_PATH \
100
+ --results_path $VARIANT-results.json \
101
+ --tensor-model-parallel-size $TP_SIZE \
102
+ --pipeline-model-parallel-size $PP_SIZE \
103
+ --tokenizer-type PretrainedFromHF \
104
+ --tokenizer-name-or-path $TOKENIZER_NAME_OR_PATH \
105
+ --micro-batch-size $EVAL_MICRO_BATCH_SIZE \
106
+ --no-load-optim \
107
+ --no-load-rng \
108
+ --eval_fp32 \
109
+ --inference \
110
+ --seq-length $SEQ_LEN \
111
+ --task_list ${TASKS[$SLURM_ARRAY_TASK_ID]} \
112
+ --deepspeed \
113
+ --deepspeed_config ds_config.json \
114
+ --intermed_results \
115
+ --adaptive_seq_len \
116
+ --micro_bs_multiplier 8 \
117
+ $MEGATRON_REQUIRED_ARGS \
118
+ "
119
+
120
+ GPUS_PER_NODE=1
121
+ NNODES=$SLURM_NNODES
122
+ MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
123
+ MASTER_PORT=$((6000 + $SLURM_ARRAY_TASK_ID))
124
+ export LAUNCHER="python -u -m torch.distributed.run \
125
+ --nproc_per_node $GPUS_PER_NODE \
126
+ --nnodes $NNODES \
127
+ --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
128
+ --rdzv_backend c10d \
129
+ --max_restarts 0 \
130
+ --tee 3 \
131
+ "
132
+
133
+ export CUDA_LAUNCH_BLOCKING=1
134
+
135
+ echo $LAUNCHER $CMD
136
+
137
+ export PYTHONPATH=$MEGATRON_DEEPSPEED_REPO
138
+
139
+ $LAUNCHER $CMD 2>&1 | tee $VARIANT-eval-harness.log
evaluation/results/tr13/lmeval/run_generation.slurm ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=evaluate_t0
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --constraint=a100
9
+ #SBATCH --reservation=hug
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --array=0-9
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ conda activate muennighofflmevalgen
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # defining the right environment variables
23
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
24
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
25
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
26
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+ export TOKENIZERS_PARALLELISM=false
30
+
31
+ # Converted transformer checkpoint
32
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmixlossseq
33
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmix
34
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3/bloom-6b3
35
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-p31
36
+
37
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bslmevalgeneration/lm-evaluation-harness
38
+
39
+ # WMT19 ZH-EN does not work
40
+ DATASETS_AND_CONFIGS=(
41
+ wmt19_zh_en,zh-en,"version-en-zh-target"
42
+ wmt19_zh_en,zh-en,"a_good_translation-en-zh-target"
43
+ wmt19_zh_en,zh-en,"a_good_translation-en-zh-source+target"
44
+ wmt19_zh_en,zh-en,"xglm-en-zh-target"
45
+ wmt19_zh_en,zh-en,"gpt3-en-zh"
46
+ wmt19_zh_en,zh-en,"version-zh-en-target"
47
+ wmt19_zh_en,zh-en,"a_good_translation-zh-en-target"
48
+ wmt19_zh_en,zh-en,"a_good_translation-zh-en-source+target"
49
+ wmt19_zh_en,zh-en,"xglm-zh-en-target"
50
+ wmt19_zh_en,zh-en,"gpt3-zh-en"
51
+ )
52
+
53
+ DATASETS_AND_CONFIGS=(
54
+ wmt14_fr_en,fr-en,"version-en-fr-target"
55
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
56
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
57
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
58
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
59
+ wmt14_fr_en,fr-en,"version-fr-en-target"
60
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
61
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
62
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
63
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
64
+ )
65
+
66
+ # Use --limit 3000
67
+ DATASETS_AND_CONFIGS=(
68
+ mlsum_es,"es","layman_summ_es"
69
+ mlsum_es,"es","palm_prompt"
70
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
71
+ )
72
+
73
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
74
+ echo $ARGUMENT
75
+
76
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
77
+
78
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
79
+ python main.py \
80
+ --model_api_name 'hf-causal' \
81
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
82
+ --device cuda \
83
+ --batch_size 16 \
84
+ --no_tracking \
85
+ --task_name $dataset_name \
86
+ --template_names $template_name \
87
+ --bootstrap_iters 10 \
88
+ --limit 3000
89
+
90
+ echo "END TIME: $(date)"
evaluation/results/tr13/lmeval/run_generation_7b1.slurm ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=evaluate_t0
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --constraint=a100
9
+ #SBATCH --reservation=hug
10
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
11
+ #SBATCH --output=%x-%j.out # output file name
12
+ #SBATCH --account=six@a100
13
+ #SBATCH --array=0-2
14
+
15
+ set -x -e
16
+
17
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
18
+ conda activate muennighofflmevalgen
19
+
20
+ echo "START TIME: $(date)"
21
+
22
+ # defining the right environment variables
23
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
24
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
25
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
26
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
27
+ export HF_DATASETS_OFFLINE=1
28
+ export TRANSFORMERS_OFFLINE=1
29
+ export TOKENIZERS_PARALLELISM=false
30
+
31
+ # Converted transformer checkpoint
32
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks341b-t0toks13b-xp3capmixnewcodelonglossseq
33
+
34
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
35
+
36
+
37
+ DATASETS_AND_CONFIGS=(
38
+ wmt14_fr_en,fr-en,"version-en-fr-target"
39
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
40
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
41
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
42
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
43
+ wmt14_fr_en,fr-en,"version-fr-en-target"
44
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
45
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
46
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
47
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
48
+ )
49
+
50
+ DATASETS_AND_CONFIGS=(
51
+ wmt14_hi_en,hi-en,"version-en-hi-target"
52
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
53
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
54
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
55
+ wmt14_hi_en,hi-en,"gpt3-en-hi-target"
56
+ wmt14_hi_en,hi-en,"version-hi-en-target"
57
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
58
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
59
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
60
+ wmt14_hi_en,hi-en,"gpt3-hi-en-target"
61
+ )
62
+
63
+ DATASETS_AND_CONFIGS=(
64
+ mlsum_es,"es","layman_summ_es"
65
+ mlsum_es,"es","palm_prompt"
66
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
67
+ )
68
+
69
+ DATASET_AND_CONFIG=${DATASETS_AND_CONFIGS[$SLURM_ARRAY_TASK_ID]}
70
+ echo $ARGUMENT
71
+
72
+ IFS=',' read dataset_name lang template_name <<< "${DATASET_AND_CONFIG}"
73
+
74
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
75
+ python main.py \
76
+ --model_api_name 'hf-causal' \
77
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
78
+ --device cuda \
79
+ --batch_size 16 \
80
+ --no_tracking \
81
+ --task_name $dataset_name \
82
+ --template_names $template_name \
83
+ --bootstrap_iters 10 \
84
+ --limit 3000
85
+
86
+ echo "END TIME: $(date)"
evaluation/results/tr13/lmeval/transformersbslmeval.slurm ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=bseval-tr13f-6B3
3
+ #SBATCH --partition=gpu_p5
4
+ #SBATCH --constraint=a100
5
+ #SBATCH --reservation=hug
6
+ #SBATCH --qos=qos_gpu-gc # up to 100h
7
+ #SBATCH --nodes=1
8
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
9
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
10
+ #SBATCH --hint=nomultithread # we get physical cores not logical
11
+ #SBATCH --gres=gpu:1 # number of gpus
12
+ #SBATCH --time 100:00:00 # maximum execution time (HH:MM:SS)
13
+ #SBATCH --output=%x-%j.out # output file name
14
+ #SBATCH --account=six@a100
15
+
16
+ set -x -e
17
+
18
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
19
+ conda activate muennighofflmeval
20
+
21
+ echo "START TIME: $(date)"
22
+
23
+ # defining the right environment variables
24
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
25
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
26
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
27
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
28
+ export HF_DATASETS_OFFLINE=1
29
+ export TRANSFORMERS_OFFLINE=1
30
+
31
+ # Converted transformer checkpoint
32
+ #MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6B3-ml-t0-lmtoks168B-t0toks8b5
33
+ #MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6B3-ml-t0-lmtoks168B-t0toks0
34
+ MODEL_CKPT=/gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/6b3t0/tr13f-6b3-ml-t0-lmtoks168b-t0toks13b
35
+
36
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/bloomckpt/lm-evaluation-harness
37
+
38
+ # GEM/wiki_lingua_es has 5 prompts
39
+ NUM_TASKS=5
40
+
41
+
42
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
43
+ python3 main.py --model hf-causal \
44
+ --model_args pretrained=$MODEL_CKPT,use_accelerate=True,tokenizer=$MODEL_CKPT,dtype=float16 \
45
+ --tasks GEM/wiki_lingua_es \
46
+ --device cuda \
47
+ --batch_size 16 \
48
+ --no_cache \
49
+ --no_tracking \
50
+ --prompts $SLURM_ARRAY_TASK_ID \
51
+ --num_fewshot 0
52
+
53
+ echo "END TIME: $(date)"
evaluation/results/tr13/tzeroeval/convert_validation_176b.slurm ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=six@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13-176B-ml-t0/checkpoints/xp3zzlossseq
20
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13-176B-ml-t0/checkpoints/p31lossseq
21
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13-176B-ml-t0/checkpoints/xp3capmixnewcodelonglossseq
22
+
23
+ CKPTS=(
24
+ global_step249
25
+ global_step498
26
+ global_step747
27
+ global_step996
28
+ global_step1245
29
+ global_step1494
30
+ global_step1743
31
+ global_step1992
32
+ global_step2241
33
+ )
34
+ EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/176bt0/tr13-176b-ml-t0-lmtoks341b-t0toks13b-xp3capmixnewcodelonglossseq
35
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/176bt0
36
+
37
+ #OUT_PREFIX=xp3zzlossseq_
38
+ OUT_PREFIX=p31lossseq_
39
+ #OUT_PREFIX=xp3capmixnewcodelonglossseq_
40
+
41
+ TP=1
42
+
43
+ ### CONVERT ###
44
+
45
+
46
+ for i in {0..8}; do
47
+ CKPT=${CKPTS[$i]}
48
+ echo "$i"
49
+ echo "Running $CKPT"
50
+
51
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
52
+ mkdir -p $OUTPUTCKPT
53
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json --shard_model
54
+
55
+ # Copy tokenizer.json etc
56
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
57
+
58
+ # Use model prior to finetuning
59
+ #OUTPUTCKPT=/gpfsscratch/rech/six/commun/uan68tv-model-conversion/bloom
60
+
61
+ eval_script="./eval_$i.slurm"
62
+ cat <<EOT > $eval_script
63
+ #!/bin/bash
64
+ #SBATCH --job-name=evaluate_t0
65
+ #SBATCH --nodes=1
66
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
67
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
68
+ #SBATCH --hint=nomultithread # we get physical cores not logical
69
+ #SBATCH --gres=gpu:8 # number of gpus
70
+ #SBATCH --constraint=a100
71
+ #SBATCH --reservation=hug
72
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
73
+ #SBATCH --output=%x-%j.out # output file name
74
+ #SBATCH --account=six@a100
75
+ #SBATCH --array=0-155
76
+
77
+ set -x -e
78
+
79
+ source $six_ALL_CCFRWORK/start-py38-pt111
80
+ conda activate thomas_t_zero_evaluation
81
+
82
+ CHECKPOINT_PATH=$OUTPUTCKPT
83
+
84
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
85
+ pushd "\$WORKDIR"
86
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
87
+ mkdir -p "\$OUTPUT_DIR"
88
+
89
+ # Validation
90
+ DATASETS_AND_CONFIGS_VAL=(
91
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
92
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
93
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
94
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
95
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
96
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
97
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
98
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
99
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
100
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
101
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
102
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
103
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
104
+ climate_fever,None,None,"third_evidence_claim_pair",test
105
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
106
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
107
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
108
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
109
+ aqua_rat,raw,None,"select_the_best_option",validation
110
+ aqua_rat,raw,None,"answer_quiz",validation
111
+ aqua_rat,raw,None,"Answer questions from options",validation
112
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
113
+ commonsense_qa,None,None,"question_answering",validation
114
+ commonsense_qa,None,None,"most_suitable_answer",validation
115
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
116
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
117
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
118
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
119
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
120
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
121
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
122
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
123
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
124
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
125
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
126
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
127
+ art,None,None,"choose_hypothesis_options",validation
128
+ art,None,None,"choose_hypothesis_believable",validation
129
+ art,None,None,"choose_hypothesis",validation
130
+ art,None,None,"choose_hypothesis_desc",validation
131
+ art,None,None,"choose_hypothesis_likely",validation
132
+ banking77,None,None,"help_page_topic",test
133
+ banking77,None,None,"direct_to_which_department",test
134
+ banking77,None,None,"rephrase_as_banking_term",test
135
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
136
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
137
+ blbooksgenre,title_genre_classifiction,None,"classify",train
138
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
139
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
140
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
141
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
142
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
143
+ conv_ai_3,None,None,"clarification_needed",validation
144
+ conv_ai_3,None,None,"score_give_number",validation
145
+ conv_ai_3,None,None,"ambiguous",validation
146
+ conv_ai_3,None,None,"directly_answer",validation
147
+ conv_ai_3,None,None,"score_how_much",validation
148
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
149
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
150
+ craigslist_bargains,None,None,"good deal for seller",validation
151
+ craigslist_bargains,None,None,"best deal",validation
152
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
153
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
154
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
155
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
156
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
157
+ emo,None,None,"persons_describe",validation
158
+ emo,None,None,"final_message",validation
159
+ emo,None,None,"what_emotion_do_you_think",validation
160
+ emo,None,None,"emotional_state",validation
161
+ emo,None,None,"dialogue_between",validation
162
+ emotion,None,None,"choose_the_best_emotion_label",test
163
+ emotion,None,None,"reply_with_emoation_label",test
164
+ emotion,None,None,"answer_with_class_label",test
165
+ emotion,None,None,"answer_question_with_emotion_label",test
166
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
167
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
168
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
169
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
170
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
171
+ glue,cola,None,"Make sense yes no",validation
172
+ glue,cola,None,"is_this_correct",validation
173
+ glue,cola,None,"editing",validation
174
+ glue,cola,None,"Following sentence acceptable",validation
175
+ glue,cola,None,"Previous sentence acceptable",validation
176
+ glue,sst2,None,"positive negative after",validation
177
+ glue,sst2,None,"review",validation
178
+ glue,sst2,None,"said",validation
179
+ glue,sst2,None,"following positive negative",validation
180
+ glue,sst2,None,"happy or mad",validation
181
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
182
+ health_fact,None,None,"claim_explanation_classification",validation
183
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
184
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
185
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
186
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
187
+ hlgd,None,None,"is_same_event_refer",validation
188
+ hlgd,None,None,"is_same_event_editor_asks",validation
189
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
190
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
191
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
192
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
193
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
194
+ liar,None,None,"Given statement guess category",validation
195
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
196
+ lince,sa_spaeng,None,"sentiment trying to express",validation
197
+ lince,sa_spaeng,None,"express sentiment",validation
198
+ lince,sa_spaeng,None,"negation template",validation
199
+ lince,sa_spaeng,None,"the author seem",validation
200
+ math_qa,None,None,"choose_correct_og",test
201
+ math_qa,None,None,"pick_the_correct",test
202
+ math_qa,None,None,"first_choice_then_problem",test
203
+ math_qa,None,None,"problem_set_type",test
204
+ math_qa,None,None,"gre_problem",test
205
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
206
+ movie_rationales,None,None,"Evidences sentiment classification",validation
207
+ movie_rationales,None,None,"Evidences + review",validation
208
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
209
+ mwsc,None,None,"in-the-sentence-question-first",validation
210
+ mwsc,None,None,"what-think",validation
211
+ mwsc,None,None,"in-the-sentence",validation
212
+ mwsc,None,None,"options-or",validation
213
+ mwsc,None,None,"is-correct",validation
214
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
215
+ poem_sentiment,None,None,"question_answer_format",validation
216
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
217
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
218
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
219
+ onestop_english,None,None,"esl_context",train
220
+ onestop_english,None,None,"ara_context",train
221
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
222
+ onestop_english,None,None,"esl_variation",train
223
+ onestop_english,None,None,"assess",train
224
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
225
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
226
+ riddle_sense,None,None,"most_suitable_answer",validation
227
+ riddle_sense,None,None,"answer_given_question_without_options",validation
228
+ riddle_sense,None,None,"question_to_answer_index",validation
229
+ riddle_sense,None,None,"question_answering",validation
230
+ scicite,None,None,"Classify intent w/section (select choice)",validation
231
+ scicite,None,None,"Classify intent (choices first)",validation
232
+ scicite,None,None,"Classify intent (select choice)",validation
233
+ scicite,None,None,"Classify intent",validation
234
+ scicite,None,None,"can_describe",validation
235
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
236
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
237
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
238
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
239
+ snips_built_in_intents,None,None,"voice_intent",train
240
+ snips_built_in_intents,None,None,"categorize_query",train
241
+ snips_built_in_intents,None,None,"intent_query",train
242
+ snips_built_in_intents,None,None,"categorize_query_brief",train
243
+ snips_built_in_intents,None,None,"query_intent",train
244
+ )
245
+
246
+ DATASETS_AND_CONFIGS_VAL=(
247
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
248
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
249
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
250
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
251
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
252
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
253
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
254
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
255
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
256
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
257
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
258
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
259
+ )
260
+
261
+
262
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
263
+ echo "\$ARGUMENT"
264
+
265
+ # Run T0 evaluation
266
+ # For PrefixLM add --prefixlm
267
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
268
+ python t-zero/evaluation/run_eval.py \
269
+ --dataset_name "\$dataset_name" \
270
+ --dataset_config_name "\$dataset_config_name" \
271
+ --template_config_name "\$template_config_name" \
272
+ --template_name "\$template_name" \
273
+ --split "\$split" \
274
+ --model_name_or_path "\$CHECKPOINT_PATH" \
275
+ --output_dir "\$OUTPUT_DIR" \
276
+ --per_device_eval_batch_size 4 \
277
+ --max_length 2048 \
278
+ --dtype bfloat16
279
+ EOT
280
+
281
+ sbatch $eval_script
282
+
283
+
284
+ lm_eval_script="./lm_eval_$i.slurm"
285
+ cat <<EOT > $lm_eval_script
286
+ #!/bin/bash
287
+ #SBATCH --job-name=lmeval
288
+ #SBATCH --nodes=1
289
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
290
+ #SBATCH --cpus-per-task=64 # number of cores per tasks
291
+ #SBATCH --hint=nomultithread # we get physical cores not logical
292
+ #SBATCH --gres=gpu:8 # number of gpus
293
+ #SBATCH --constraint=a100
294
+ #SBATCH --reservation=hug
295
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
296
+ #SBATCH --output=%x-%j.out # output file name
297
+ #SBATCH --account=six@a100
298
+ #SBATCH --array=0-12
299
+
300
+ set -x -e
301
+
302
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
303
+ conda activate muennighofflmevalgen
304
+
305
+ echo "START TIME: $(date)"
306
+
307
+ # defining the right environment variables
308
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
309
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
310
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
311
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
312
+ export HF_DATASETS_OFFLINE=1
313
+ export TRANSFORMERS_OFFLINE=1
314
+ export TOKENIZERS_PARALLELISM=false
315
+
316
+ # Converted transformer checkpoint
317
+ MODEL_CKPT=$OUTPUTCKPT
318
+
319
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
320
+
321
+
322
+ DATASETS_AND_CONFIGS=(
323
+ wmt14_fr_en,fr-en,"version-en-fr-target"
324
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
325
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
326
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
327
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
328
+ wmt14_fr_en,fr-en,"version-fr-en-target"
329
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
330
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
331
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
332
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
333
+ wmt14_hi_en,hi-en,"version-en-hi-target"
334
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
335
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
336
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
337
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
338
+ wmt14_hi_en,hi-en,"version-hi-en-target"
339
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
340
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
341
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
342
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
343
+ mlsum_es,"es","layman_summ_es"
344
+ mlsum_es,"es","palm_prompt"
345
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
346
+ )
347
+
348
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
349
+ echo "\$ARGUMENT"
350
+
351
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
352
+
353
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
354
+ python main.py \
355
+ --model_api_name 'hf-causal' \
356
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=bfloat16" \
357
+ --device cuda \
358
+ --batch_size 4 \
359
+ --no_tracking \
360
+ --task_name "\$dataset_name" \
361
+ --template_names "\$template_name" \
362
+ --bootstrap_iters 10 \
363
+ --limit 3000
364
+
365
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
366
+ mv /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness/outputs/*$CKPT*\$dataset_name* "$OUTPUTCKPT/evaluation/\$dataset_name/"
367
+
368
+ echo "END TIME: $(date)"
369
+ EOT
370
+
371
+ sbatch $lm_eval_script
372
+
373
+ done
evaluation/results/tr13/tzeroeval/convert_validation_1b3.slurm ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13b-1B3-ml-t0/checkpoints/xp3capmixnewcodelonglossseq
20
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/p31lossseq
21
+
22
+ CKPTS=(
23
+ global_step250
24
+ global_step500
25
+ global_step750
26
+ global_step1000
27
+ global_step1250
28
+ global_step1500
29
+ global_step1750
30
+ global_step2000
31
+ global_step2250
32
+ global_step2500
33
+ global_step2750
34
+ global_step3000
35
+ )
36
+ EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/1b3/bloom-1b7
37
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/1b3t0
38
+ OUT_PREFIX=xp3capmixlossseq_
39
+ #OUT_PREFIX=p31lossseq
40
+
41
+ TP=1
42
+
43
+ ### CONVERT ###
44
+
45
+
46
+ for i in {0..11}; do
47
+ CKPT=${CKPTS[$i]}
48
+ echo "$i"
49
+ echo "Running $CKPT"
50
+
51
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
52
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json
53
+
54
+ # Copy tokenizer.json etc
55
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
56
+
57
+ eval_script="./eval_$i.slurm"
58
+ cat <<EOT > $eval_script
59
+ #!/bin/bash
60
+ #SBATCH --job-name=evaluate_t0
61
+ #SBATCH --nodes=1
62
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
63
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
64
+ #SBATCH --hint=nomultithread # we get physical cores not logical
65
+ #SBATCH --gres=gpu:1 # number of gpus
66
+ #SBATCH --constraint=a100
67
+ #SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS)
68
+ #SBATCH --output=%x-%j.out # output file name
69
+ #SBATCH --account=six@a100
70
+ #SBATCH --array=0-168
71
+
72
+ set -x -e
73
+
74
+ source $six_ALL_CCFRWORK/start-py38-pt111
75
+ conda activate muennighofflmevalgen
76
+
77
+ CHECKPOINT_PATH=$OUTPUTCKPT
78
+
79
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
80
+ pushd "\$WORKDIR"
81
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
82
+ mkdir -p "\$OUTPUT_DIR"
83
+
84
+ # Validation
85
+ DATASETS_AND_CONFIGS_VAL=(
86
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
87
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
88
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
89
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
90
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
91
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
92
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
93
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
94
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
95
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
96
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
97
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
98
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
99
+ climate_fever,None,None,"third_evidence_claim_pair",test
100
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
101
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
102
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
103
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
104
+ aqua_rat,raw,None,"select_the_best_option",validation
105
+ aqua_rat,raw,None,"answer_quiz",validation
106
+ aqua_rat,raw,None,"Answer questions from options",validation
107
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
108
+ commonsense_qa,None,None,"question_answering",validation
109
+ commonsense_qa,None,None,"most_suitable_answer",validation
110
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
111
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
112
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
113
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
114
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
115
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
116
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
117
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
118
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
119
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
120
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
121
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
122
+ art,None,None,"choose_hypothesis_options",validation
123
+ art,None,None,"choose_hypothesis_believable",validation
124
+ art,None,None,"choose_hypothesis",validation
125
+ art,None,None,"choose_hypothesis_desc",validation
126
+ art,None,None,"choose_hypothesis_likely",validation
127
+ banking77,None,None,"help_page_topic",test
128
+ banking77,None,None,"direct_to_which_department",test
129
+ banking77,None,None,"rephrase_as_banking_term",test
130
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
131
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
132
+ blbooksgenre,title_genre_classifiction,None,"classify",train
133
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
134
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
135
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
136
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
137
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
138
+ conv_ai_3,None,None,"clarification_needed",validation
139
+ conv_ai_3,None,None,"score_give_number",validation
140
+ conv_ai_3,None,None,"ambiguous",validation
141
+ conv_ai_3,None,None,"directly_answer",validation
142
+ conv_ai_3,None,None,"score_how_much",validation
143
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
144
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
145
+ craigslist_bargains,None,None,"good deal for seller",validation
146
+ craigslist_bargains,None,None,"best deal",validation
147
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
148
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
149
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
150
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
151
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
152
+ emo,None,None,"persons_describe",validation
153
+ emo,None,None,"final_message",validation
154
+ emo,None,None,"what_emotion_do_you_think",validation
155
+ emo,None,None,"emotional_state",validation
156
+ emo,None,None,"dialogue_between",validation
157
+ emotion,None,None,"choose_the_best_emotion_label",test
158
+ emotion,None,None,"reply_with_emoation_label",test
159
+ emotion,None,None,"answer_with_class_label",test
160
+ emotion,None,None,"answer_question_with_emotion_label",test
161
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
162
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
163
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
164
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
165
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
166
+ glue,cola,None,"Make sense yes no",validation
167
+ glue,cola,None,"is_this_correct",validation
168
+ glue,cola,None,"editing",validation
169
+ glue,cola,None,"Following sentence acceptable",validation
170
+ glue,cola,None,"Previous sentence acceptable",validation
171
+ glue,sst2,None,"positive negative after",validation
172
+ glue,sst2,None,"review",validation
173
+ glue,sst2,None,"said",validation
174
+ glue,sst2,None,"following positive negative",validation
175
+ glue,sst2,None,"happy or mad",validation
176
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
177
+ health_fact,None,None,"claim_explanation_classification",validation
178
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
179
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
180
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
181
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
182
+ hlgd,None,None,"is_same_event_refer",validation
183
+ hlgd,None,None,"is_same_event_editor_asks",validation
184
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
185
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
186
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
187
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
188
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
189
+ liar,None,None,"Given statement guess category",validation
190
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
191
+ lince,sa_spaeng,None,"sentiment trying to express",validation
192
+ lince,sa_spaeng,None,"express sentiment",validation
193
+ lince,sa_spaeng,None,"negation template",validation
194
+ lince,sa_spaeng,None,"the author seem",validation
195
+ math_qa,None,None,"choose_correct_og",test
196
+ math_qa,None,None,"pick_the_correct",test
197
+ math_qa,None,None,"first_choice_then_problem",test
198
+ math_qa,None,None,"problem_set_type",test
199
+ math_qa,None,None,"gre_problem",test
200
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
201
+ movie_rationales,None,None,"Evidences sentiment classification",validation
202
+ movie_rationales,None,None,"Evidences + review",validation
203
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
204
+ mwsc,None,None,"in-the-sentence-question-first",validation
205
+ mwsc,None,None,"what-think",validation
206
+ mwsc,None,None,"in-the-sentence",validation
207
+ mwsc,None,None,"options-or",validation
208
+ mwsc,None,None,"is-correct",validation
209
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
210
+ poem_sentiment,None,None,"question_answer_format",validation
211
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
212
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
213
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
214
+ onestop_english,None,None,"esl_context",train
215
+ onestop_english,None,None,"ara_context",train
216
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
217
+ onestop_english,None,None,"esl_variation",train
218
+ onestop_english,None,None,"assess",train
219
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
220
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
221
+ riddle_sense,None,None,"most_suitable_answer",validation
222
+ riddle_sense,None,None,"answer_given_question_without_options",validation
223
+ riddle_sense,None,None,"question_to_answer_index",validation
224
+ riddle_sense,None,None,"question_answering",validation
225
+ scicite,None,None,"Classify intent w/section (select choice)",validation
226
+ scicite,None,None,"Classify intent (choices first)",validation
227
+ scicite,None,None,"Classify intent (select choice)",validation
228
+ scicite,None,None,"Classify intent",validation
229
+ scicite,None,None,"can_describe",validation
230
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
231
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
232
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
233
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
234
+ snips_built_in_intents,None,None,"voice_intent",train
235
+ snips_built_in_intents,None,None,"categorize_query",train
236
+ snips_built_in_intents,None,None,"intent_query",train
237
+ snips_built_in_intents,None,None,"categorize_query_brief",train
238
+ snips_built_in_intents,None,None,"query_intent",train
239
+ )
240
+
241
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
242
+ echo "\$ARGUMENT"
243
+
244
+ # Run T0 evaluation
245
+ # For PrefixLM add --prefixlm
246
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
247
+ python t-zero/evaluation/run_eval.py \
248
+ --dataset_name "\$dataset_name" \
249
+ --dataset_config_name "\$dataset_config_name" \
250
+ --template_config_name "\$template_config_name" \
251
+ --template_name "\$template_name" \
252
+ --split "\$split" \
253
+ --model_name_or_path "\$CHECKPOINT_PATH" \
254
+ --output_dir "\$OUTPUT_DIR" \
255
+ --per_device_eval_batch_size 4 \
256
+ --max_length 2048 \
257
+ --dtype float16
258
+ EOT
259
+
260
+ sbatch $eval_script
261
+
262
+
263
+ lm_eval_script="./lm_eval_$i.slurm"
264
+ cat <<EOT > $lm_eval_script
265
+ #!/bin/bash
266
+ #SBATCH --job-name=lmeval
267
+ #SBATCH --nodes=1
268
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
269
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
270
+ #SBATCH --hint=nomultithread # we get physical cores not logical
271
+ #SBATCH --gres=gpu:1 # number of gpus
272
+ #SBATCH --constraint=a100
273
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
274
+ #SBATCH --output=%x-%j.out # output file name
275
+ #SBATCH --account=six@a100
276
+ #SBATCH --array=0-22
277
+
278
+ set -x -e
279
+
280
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
281
+ conda activate muennighofflmevalgen
282
+
283
+ echo "START TIME: $(date)"
284
+
285
+ # defining the right environment variables
286
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
287
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
288
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
289
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
290
+ export HF_DATASETS_OFFLINE=1
291
+ export TRANSFORMERS_OFFLINE=1
292
+ export TOKENIZERS_PARALLELISM=false
293
+
294
+ # Converted transformer checkpoint
295
+ MODEL_CKPT=$OUTPUTCKPT
296
+
297
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
298
+
299
+
300
+ DATASETS_AND_CONFIGS=(
301
+ wmt14_fr_en,fr-en,"version-en-fr-target"
302
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
303
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
304
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
305
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
306
+ wmt14_fr_en,fr-en,"version-fr-en-target"
307
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
308
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
309
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
310
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
311
+ wmt14_hi_en,hi-en,"version-en-hi-target"
312
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
313
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
314
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
315
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
316
+ wmt14_hi_en,hi-en,"version-hi-en-target"
317
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
318
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
319
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
320
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
321
+ mlsum_es,"es","layman_summ_es"
322
+ mlsum_es,"es","palm_prompt"
323
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
324
+ )
325
+
326
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
327
+ echo "\$ARGUMENT"
328
+
329
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
330
+
331
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
332
+ python main.py \
333
+ --model_api_name 'hf-causal' \
334
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \
335
+ --device cuda \
336
+ --batch_size 16 \
337
+ --no_tracking \
338
+ --task_name "\$dataset_name" \
339
+ --template_names "\$template_name" \
340
+ --bootstrap_iters 10 \
341
+ --limit 3000
342
+
343
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
344
+ mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/"
345
+
346
+ echo "END TIME: $(date)"
347
+ EOT
348
+
349
+ sbatch $lm_eval_script
350
+
351
+
352
+ done
evaluation/results/tr13/tzeroeval/convert_validation_350m.slurm ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13e-350M-ml-t0/checkpoints/xp3capmixnewcodelonglossseq
20
+
21
+ CKPTS=(
22
+ global_step250
23
+ global_step500
24
+ global_step750
25
+ global_step1000
26
+ global_step1250
27
+ global_step1500
28
+ global_step1750
29
+ global_step2000
30
+ global_step2250
31
+ global_step2500
32
+ global_step2750
33
+ global_step3000
34
+ )
35
+ EXAMPLE_CKPT=/gpfsssd/scratch/rech/six/commun/experiments/muennighoff/bloomckpt/350m/bloom-560m
36
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/350mt0
37
+ OUT_PREFIX=xp3capmixnewcodelonglossseq
38
+
39
+ TP=1
40
+
41
+ ### CONVERT ###
42
+
43
+
44
+ for i in {0..12}; do
45
+ CKPT=${CKPTS[$i]}
46
+ echo "$i"
47
+ echo "Running $CKPT"
48
+
49
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
50
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json
51
+
52
+ # Copy tokenizer.json etc
53
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
54
+
55
+ eval_script="./eval_$i.slurm"
56
+ cat <<EOT > $eval_script
57
+ #!/bin/bash
58
+ #SBATCH --job-name=evaluate_t0
59
+ #SBATCH --nodes=1
60
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
61
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
62
+ #SBATCH --hint=nomultithread # we get physical cores not logical
63
+ #SBATCH --gres=gpu:1 # number of gpus
64
+ #SBATCH --constraint=a100
65
+ #SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS)
66
+ #SBATCH --output=%x-%j.out # output file name
67
+ #SBATCH --account=six@a100
68
+ #SBATCH --array=0-168
69
+
70
+ set -x -e
71
+
72
+ source $six_ALL_CCFRWORK/start-py38-pt111
73
+ conda activate muennighofflmevalgen
74
+
75
+ CHECKPOINT_PATH=$OUTPUTCKPT
76
+
77
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
78
+ pushd "\$WORKDIR"
79
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
80
+ mkdir -p "\$OUTPUT_DIR"
81
+
82
+ # Validation
83
+ DATASETS_AND_CONFIGS_VAL=(
84
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
85
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
86
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
87
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
88
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
89
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
90
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
91
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
92
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
93
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
94
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
95
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
96
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
97
+ climate_fever,None,None,"third_evidence_claim_pair",test
98
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
99
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
100
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
101
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
102
+ aqua_rat,raw,None,"select_the_best_option",validation
103
+ aqua_rat,raw,None,"answer_quiz",validation
104
+ aqua_rat,raw,None,"Answer questions from options",validation
105
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
106
+ commonsense_qa,None,None,"question_answering",validation
107
+ commonsense_qa,None,None,"most_suitable_answer",validation
108
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
109
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
110
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
111
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
112
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
113
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
114
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
115
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
116
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
117
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
118
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
119
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
120
+ art,None,None,"choose_hypothesis_options",validation
121
+ art,None,None,"choose_hypothesis_believable",validation
122
+ art,None,None,"choose_hypothesis",validation
123
+ art,None,None,"choose_hypothesis_desc",validation
124
+ art,None,None,"choose_hypothesis_likely",validation
125
+ banking77,None,None,"help_page_topic",test
126
+ banking77,None,None,"direct_to_which_department",test
127
+ banking77,None,None,"rephrase_as_banking_term",test
128
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
129
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
130
+ blbooksgenre,title_genre_classifiction,None,"classify",train
131
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
132
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
133
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
134
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
135
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
136
+ conv_ai_3,None,None,"clarification_needed",validation
137
+ conv_ai_3,None,None,"score_give_number",validation
138
+ conv_ai_3,None,None,"ambiguous",validation
139
+ conv_ai_3,None,None,"directly_answer",validation
140
+ conv_ai_3,None,None,"score_how_much",validation
141
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
142
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
143
+ craigslist_bargains,None,None,"good deal for seller",validation
144
+ craigslist_bargains,None,None,"best deal",validation
145
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
146
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
147
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
148
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
149
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
150
+ emo,None,None,"persons_describe",validation
151
+ emo,None,None,"final_message",validation
152
+ emo,None,None,"what_emotion_do_you_think",validation
153
+ emo,None,None,"emotional_state",validation
154
+ emo,None,None,"dialogue_between",validation
155
+ emotion,None,None,"choose_the_best_emotion_label",test
156
+ emotion,None,None,"reply_with_emoation_label",test
157
+ emotion,None,None,"answer_with_class_label",test
158
+ emotion,None,None,"answer_question_with_emotion_label",test
159
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
160
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
161
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
162
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
163
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
164
+ glue,cola,None,"Make sense yes no",validation
165
+ glue,cola,None,"is_this_correct",validation
166
+ glue,cola,None,"editing",validation
167
+ glue,cola,None,"Following sentence acceptable",validation
168
+ glue,cola,None,"Previous sentence acceptable",validation
169
+ glue,sst2,None,"positive negative after",validation
170
+ glue,sst2,None,"review",validation
171
+ glue,sst2,None,"said",validation
172
+ glue,sst2,None,"following positive negative",validation
173
+ glue,sst2,None,"happy or mad",validation
174
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
175
+ health_fact,None,None,"claim_explanation_classification",validation
176
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
177
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
178
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
179
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
180
+ hlgd,None,None,"is_same_event_refer",validation
181
+ hlgd,None,None,"is_same_event_editor_asks",validation
182
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
183
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
184
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
185
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
186
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
187
+ liar,None,None,"Given statement guess category",validation
188
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
189
+ lince,sa_spaeng,None,"sentiment trying to express",validation
190
+ lince,sa_spaeng,None,"express sentiment",validation
191
+ lince,sa_spaeng,None,"negation template",validation
192
+ lince,sa_spaeng,None,"the author seem",validation
193
+ math_qa,None,None,"choose_correct_og",test
194
+ math_qa,None,None,"pick_the_correct",test
195
+ math_qa,None,None,"first_choice_then_problem",test
196
+ math_qa,None,None,"problem_set_type",test
197
+ math_qa,None,None,"gre_problem",test
198
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
199
+ movie_rationales,None,None,"Evidences sentiment classification",validation
200
+ movie_rationales,None,None,"Evidences + review",validation
201
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
202
+ mwsc,None,None,"in-the-sentence-question-first",validation
203
+ mwsc,None,None,"what-think",validation
204
+ mwsc,None,None,"in-the-sentence",validation
205
+ mwsc,None,None,"options-or",validation
206
+ mwsc,None,None,"is-correct",validation
207
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
208
+ poem_sentiment,None,None,"question_answer_format",validation
209
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
210
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
211
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
212
+ onestop_english,None,None,"esl_context",train
213
+ onestop_english,None,None,"ara_context",train
214
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
215
+ onestop_english,None,None,"esl_variation",train
216
+ onestop_english,None,None,"assess",train
217
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
218
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
219
+ riddle_sense,None,None,"most_suitable_answer",validation
220
+ riddle_sense,None,None,"answer_given_question_without_options",validation
221
+ riddle_sense,None,None,"question_to_answer_index",validation
222
+ riddle_sense,None,None,"question_answering",validation
223
+ scicite,None,None,"Classify intent w/section (select choice)",validation
224
+ scicite,None,None,"Classify intent (choices first)",validation
225
+ scicite,None,None,"Classify intent (select choice)",validation
226
+ scicite,None,None,"Classify intent",validation
227
+ scicite,None,None,"can_describe",validation
228
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
229
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
230
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
231
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
232
+ snips_built_in_intents,None,None,"voice_intent",train
233
+ snips_built_in_intents,None,None,"categorize_query",train
234
+ snips_built_in_intents,None,None,"intent_query",train
235
+ snips_built_in_intents,None,None,"categorize_query_brief",train
236
+ snips_built_in_intents,None,None,"query_intent",train
237
+ )
238
+
239
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
240
+ echo "\$ARGUMENT"
241
+
242
+ # Run T0 evaluation
243
+ # For PrefixLM add --prefixlm
244
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
245
+ python t-zero/evaluation/run_eval.py \
246
+ --dataset_name "\$dataset_name" \
247
+ --dataset_config_name "\$dataset_config_name" \
248
+ --template_config_name "\$template_config_name" \
249
+ --template_name "\$template_name" \
250
+ --split "\$split" \
251
+ --model_name_or_path "\$CHECKPOINT_PATH" \
252
+ --output_dir "\$OUTPUT_DIR" \
253
+ --per_device_eval_batch_size 4 \
254
+ --max_length 2048 \
255
+ --dtype float16
256
+ EOT
257
+
258
+ sbatch $eval_script
259
+
260
+
261
+ lm_eval_script="./lm_eval_$i.slurm"
262
+ cat <<EOT > $lm_eval_script
263
+ #!/bin/bash
264
+ #SBATCH --job-name=lmeval
265
+ #SBATCH --nodes=1
266
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
267
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
268
+ #SBATCH --hint=nomultithread # we get physical cores not logical
269
+ #SBATCH --gres=gpu:1 # number of gpus
270
+ #SBATCH --constraint=a100
271
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
272
+ #SBATCH --output=%x-%j.out # output file name
273
+ #SBATCH --account=six@a100
274
+ #SBATCH --array=0-22
275
+
276
+ set -x -e
277
+
278
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
279
+ conda activate muennighofflmevalgen
280
+
281
+ echo "START TIME: $(date)"
282
+
283
+ # defining the right environment variables
284
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
285
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
286
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
287
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
288
+ export HF_DATASETS_OFFLINE=1
289
+ export TRANSFORMERS_OFFLINE=1
290
+ export TOKENIZERS_PARALLELISM=false
291
+
292
+ # Converted transformer checkpoint
293
+ MODEL_CKPT=$OUTPUTCKPT
294
+
295
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
296
+
297
+
298
+ DATASETS_AND_CONFIGS=(
299
+ wmt14_fr_en,fr-en,"version-en-fr-target"
300
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
301
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
302
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
303
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
304
+ wmt14_fr_en,fr-en,"version-fr-en-target"
305
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
306
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
307
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
308
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
309
+ wmt14_hi_en,hi-en,"version-en-hi-target"
310
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
311
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
312
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
313
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
314
+ wmt14_hi_en,hi-en,"version-hi-en-target"
315
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
316
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
317
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
318
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
319
+ mlsum_es,"es","layman_summ_es"
320
+ mlsum_es,"es","palm_prompt"
321
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
322
+ )
323
+
324
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
325
+ echo "\$ARGUMENT"
326
+
327
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
328
+
329
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
330
+ python main.py \
331
+ --model_api_name 'hf-causal' \
332
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \
333
+ --device cuda \
334
+ --batch_size 16 \
335
+ --no_tracking \
336
+ --task_name "\$dataset_name" \
337
+ --template_names "\$template_name" \
338
+ --bootstrap_iters 10 \
339
+ --limit 3000
340
+
341
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
342
+ mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/"
343
+
344
+ echo "END TIME: $(date)"
345
+ EOT
346
+
347
+ sbatch $lm_eval_script
348
+
349
+
350
+ done
evaluation/results/tr13/tzeroeval/convert_validation_760m.slurm ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=ckpts
3
+ #SBATCH --ntasks=1 # number of MP tasks
4
+ #SBATCH --nodes=1
5
+ #SBATCH --cpus-per-task=40 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --time=20:00:00 # maximum execution time (HH:MM:SS)
8
+ #SBATCH --output=%x-%j.out # output file name
9
+ #SBATCH --account=ajs@cpu
10
+ #SBATCH --partition=cpu_p1
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ export HF_DATASETS_OFFLINE=1
16
+ export TRANSFORMERS_OFFLINE=1
17
+ conda activate muennighoffmodelconv
18
+
19
+ CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13d-760M-ml-t0/checkpoints/xp3capmixnewcodelonglossseq
20
+ #CKPT_PATH=$six_ALL_CCFRSCRATCH/checkpoints/tr13f-6B3-ml-t0/checkpoints/p31lossseq
21
+
22
+ CKPTS=(
23
+ global_step250
24
+ global_step500
25
+ global_step750
26
+ global_step1000
27
+ global_step1250
28
+ global_step1500
29
+ global_step1750
30
+ global_step2000
31
+ global_step2250
32
+ global_step2500
33
+ global_step2750
34
+ global_step3000
35
+ )
36
+ EXAMPLE_CKPT=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/760m/bloom-760m
37
+ DUMP_PATH=$six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/760mt0
38
+ OUT_PREFIX=xp3capmixlossseq_
39
+ #OUT_PREFIX=p31lossseq
40
+
41
+ TP=1
42
+
43
+ ### CONVERT ###
44
+
45
+
46
+ for i in {0..11}; do
47
+ CKPT=${CKPTS[$i]}
48
+ echo "$i"
49
+ echo "Running $CKPT"
50
+
51
+ OUTPUTCKPT=$DUMP_PATH/"$OUT_PREFIX$CKPT"
52
+ python $six_ALL_CCFRSCRATCH/commun/experiments/muennighoff/bloomckpt/transformers_clone/src/transformers/models/bloom/convert_bloom_original_checkpoint_to_pytorch.py --pytorch_dump_folder_path $OUTPUTCKPT --bloom_checkpoint_path $CKPT_PATH/$CKPT --pretraining_tp $TP --bloom_config_file $EXAMPLE_CKPT/config.json
53
+
54
+ # Copy tokenizer.json etc
55
+ cp -r $EXAMPLE_CKPT/*.json $OUTPUTCKPT/
56
+
57
+ eval_script="./eval_$i.slurm"
58
+ cat <<EOT > $eval_script
59
+ #!/bin/bash
60
+ #SBATCH --job-name=evaluate_t0
61
+ #SBATCH --nodes=1
62
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
63
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
64
+ #SBATCH --hint=nomultithread # we get physical cores not logical
65
+ #SBATCH --gres=gpu:1 # number of gpus
66
+ #SBATCH --constraint=a100
67
+ #SBATCH --time 5:00:00 # maximum execution time (HH:MM:SS)
68
+ #SBATCH --output=%x-%j.out # output file name
69
+ #SBATCH --account=six@a100
70
+ #SBATCH --array=0-168
71
+
72
+ set -x -e
73
+
74
+ source $six_ALL_CCFRWORK/start-py38-pt111
75
+ conda activate thomas_t_zero_evaluation
76
+
77
+ CHECKPOINT_PATH=$OUTPUTCKPT
78
+
79
+ WORKDIR=/gpfswork/rech/six/commun/code/tr13f-6B3-ml-t0
80
+ pushd "\$WORKDIR"
81
+ OUTPUT_DIR="\$CHECKPOINT_PATH/evaluation"
82
+ mkdir -p "\$OUTPUT_DIR"
83
+
84
+ # Validation
85
+ DATASETS_AND_CONFIGS_VAL=(
86
+ head_qa,en,en,"multiple_choice_q_and_a_index_with_context_en",validation
87
+ head_qa,en,en,"multiple_choice_q_and_a_en",validation
88
+ head_qa,en,en,"multiple_choice_q_and_a_index_en",validation
89
+ head_qa,en,en,"multiple_choice_a_and_q_with_context_en",validation
90
+ head_qa,en,en,"multiple_choice_a_and_q_en",validation
91
+ head_qa,es,en,"multiple_choice_q_and_a_index_with_context_en",validation
92
+ head_qa,es,en,"multiple_choice_q_and_a_en",validation
93
+ head_qa,es,en,"multiple_choice_q_and_a_index_en",validation
94
+ head_qa,es,en,"multiple_choice_a_and_q_with_context_en",validation
95
+ head_qa,es,en,"multiple_choice_a_and_q_en",validation
96
+ climate_fever,None,None,"first_evidence_and_claim_itemization",test
97
+ climate_fever,None,None,"claim_and_all_supporting_evidences",test
98
+ climate_fever,None,None,"fifth_evidence_and_claim_itemization",test
99
+ climate_fever,None,None,"third_evidence_claim_pair",test
100
+ climate_fever,None,None,"second_evidence_and_claim_itemization",test
101
+ codah,codah,None,"interrogative_instruction_after_sentence_and_choices",train
102
+ codah,codah,None,"affirmative_instruction_before_sentence_and_choices",train
103
+ codah,codah,None,"affirmative_instruction_after_sentence_and_choices",train
104
+ aqua_rat,raw,None,"select_the_best_option",validation
105
+ aqua_rat,raw,None,"answer_quiz",validation
106
+ aqua_rat,raw,None,"Answer questions from options",validation
107
+ commonsense_qa,None,None,"answer_given_question_without_options",validation
108
+ commonsense_qa,None,None,"question_answering",validation
109
+ commonsense_qa,None,None,"most_suitable_answer",validation
110
+ amazon_reviews_multi,en,en,"prompt_title_to_star",validation
111
+ amazon_reviews_multi,en,en,"prompt_review_to_star",validation
112
+ amazon_reviews_multi,en,en,"prompt_body_title_to_star",validation
113
+ amazon_reviews_multi,zh,en,"prompt_title_to_star",validation
114
+ amazon_reviews_multi,zh,en,"prompt_review_to_star",validation
115
+ amazon_reviews_multi,zh,en,"prompt_body_title_to_star",validation
116
+ amazon_reviews_multi,fr,en,"prompt_title_to_star",validation
117
+ amazon_reviews_multi,fr,en,"prompt_review_to_star",validation
118
+ amazon_reviews_multi,fr,en,"prompt_body_title_to_star",validation
119
+ amazon_reviews_multi,es,en,"prompt_title_to_star",validation
120
+ amazon_reviews_multi,es,en,"prompt_review_to_star",validation
121
+ amazon_reviews_multi,es,en,"prompt_body_title_to_star",validation
122
+ art,None,None,"choose_hypothesis_options",validation
123
+ art,None,None,"choose_hypothesis_believable",validation
124
+ art,None,None,"choose_hypothesis",validation
125
+ art,None,None,"choose_hypothesis_desc",validation
126
+ art,None,None,"choose_hypothesis_likely",validation
127
+ banking77,None,None,"help_page_topic",test
128
+ banking77,None,None,"direct_to_which_department",test
129
+ banking77,None,None,"rephrase_as_banking_term",test
130
+ blbooksgenre,title_genre_classifiction,None,"multi-choice",train
131
+ blbooksgenre,title_genre_classifiction,None,"premise_context_first",train
132
+ blbooksgenre,title_genre_classifiction,None,"classify",train
133
+ blimp,adjunct_island,None,"grammatical_between_1_2",train
134
+ blimp,adjunct_island,None,"grammatical_between_A_B",train
135
+ blimp,adjunct_island,None,"grammatical_which_one_1_2",train
136
+ blimp,adjunct_island,None,"single_sentence_bad_yes_no",train
137
+ blimp,adjunct_island,None,"single_sentence_good_yes_no",train
138
+ conv_ai_3,None,None,"clarification_needed",validation
139
+ conv_ai_3,None,None,"score_give_number",validation
140
+ conv_ai_3,None,None,"ambiguous",validation
141
+ conv_ai_3,None,None,"directly_answer",validation
142
+ conv_ai_3,None,None,"score_how_much",validation
143
+ craigslist_bargains,None,None,"good deal for seller no list price implicit",validation
144
+ craigslist_bargains,None,None,"good deal for seller no list price",validation
145
+ craigslist_bargains,None,None,"good deal for seller",validation
146
+ craigslist_bargains,None,None,"best deal",validation
147
+ ecthr_cases,alleged-violation-prediction,None,"implicit_advice_number",validation
148
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_declaration_at_end",validation
149
+ ecthr_cases,alleged-violation-prediction,None,"ecthr_alleged_articles_question_at_start",validation
150
+ ecthr_cases,alleged-violation-prediction,None,"implicit_judgment_paragraph",validation
151
+ ecthr_cases,alleged-violation-prediction,None,"confirm number of violated articles",validation
152
+ emo,None,None,"persons_describe",validation
153
+ emo,None,None,"final_message",validation
154
+ emo,None,None,"what_emotion_do_you_think",validation
155
+ emo,None,None,"emotional_state",validation
156
+ emo,None,None,"dialogue_between",validation
157
+ emotion,None,None,"choose_the_best_emotion_label",test
158
+ emotion,None,None,"reply_with_emoation_label",test
159
+ emotion,None,None,"answer_with_class_label",test
160
+ emotion,None,None,"answer_question_with_emotion_label",test
161
+ financial_phrasebank,sentences_allagree,None,"share_price_option",train
162
+ financial_phrasebank,sentences_allagree,None,"sentiment",train
163
+ financial_phrasebank,sentences_allagree,None,"word_comes_to_mind",train
164
+ financial_phrasebank,sentences_allagree,None,"complementary_industries",train
165
+ financial_phrasebank,sentences_allagree,None,"bullish_neutral_bearish",train
166
+ glue,cola,None,"Make sense yes no",validation
167
+ glue,cola,None,"is_this_correct",validation
168
+ glue,cola,None,"editing",validation
169
+ glue,cola,None,"Following sentence acceptable",validation
170
+ glue,cola,None,"Previous sentence acceptable",validation
171
+ glue,sst2,None,"positive negative after",validation
172
+ glue,sst2,None,"review",validation
173
+ glue,sst2,None,"said",validation
174
+ glue,sst2,None,"following positive negative",validation
175
+ glue,sst2,None,"happy or mad",validation
176
+ health_fact,None,None,"claim_veracity_classification_after_reading_I_believe",validation
177
+ health_fact,None,None,"claim_explanation_classification",validation
178
+ health_fact,None,None,"claim_veracity_classification_tell_me",validation
179
+ hlgd,None,None,"is_same_event_with_time_interrogative_related",validation
180
+ hlgd,None,None,"is_same_event_interrogative_talk",validation
181
+ hlgd,None,None,"is_same_event_with_time_interrogative_talk",validation
182
+ hlgd,None,None,"is_same_event_refer",validation
183
+ hlgd,None,None,"is_same_event_editor_asks",validation
184
+ hyperpartisan_news_detection,byarticle,None,"consider_does_it_follow_a_hyperpartisan_argumentation",train
185
+ hyperpartisan_news_detection,byarticle,None,"follows_hyperpartisan_argumentation",train
186
+ hyperpartisan_news_detection,byarticle,None,"consume_with_caution",train
187
+ hyperpartisan_news_detection,byarticle,None,"extreme_left_wing_or_right_wing",train
188
+ hyperpartisan_news_detection,byarticle,None,"consider_it_exhibits_extreme_one_sidedness",train
189
+ liar,None,None,"Given statement guess category",validation
190
+ lince,sa_spaeng,None,"original poster expressed sentiment",validation
191
+ lince,sa_spaeng,None,"sentiment trying to express",validation
192
+ lince,sa_spaeng,None,"express sentiment",validation
193
+ lince,sa_spaeng,None,"negation template",validation
194
+ lince,sa_spaeng,None,"the author seem",validation
195
+ math_qa,None,None,"choose_correct_og",test
196
+ math_qa,None,None,"pick_the_correct",test
197
+ math_qa,None,None,"first_choice_then_problem",test
198
+ math_qa,None,None,"problem_set_type",test
199
+ math_qa,None,None,"gre_problem",test
200
+ movie_rationales,None,None,"Standard binary sentiment analysis",validation
201
+ movie_rationales,None,None,"Evidences sentiment classification",validation
202
+ movie_rationales,None,None,"Evidences + review",validation
203
+ movie_rationales,None,None,"Generate evidences and sentiment",validation
204
+ mwsc,None,None,"in-the-sentence-question-first",validation
205
+ mwsc,None,None,"what-think",validation
206
+ mwsc,None,None,"in-the-sentence",validation
207
+ mwsc,None,None,"options-or",validation
208
+ mwsc,None,None,"is-correct",validation
209
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_2",validation
210
+ poem_sentiment,None,None,"question_answer_format",validation
211
+ poem_sentiment,None,None,"guess_sentiment_without_options_variation_1",validation
212
+ poem_sentiment,None,None,"positive_or_negative_sentiment_variation_1",validation
213
+ poem_sentiment,None,None,"most_appropriate_sentiment",validation
214
+ onestop_english,None,None,"esl_context",train
215
+ onestop_english,None,None,"ara_context",train
216
+ onestop_english,None,None,"determine_reading_level_from_the_first_three_sentences",train
217
+ onestop_english,None,None,"esl_variation",train
218
+ onestop_english,None,None,"assess",train
219
+ pubmed_qa,pqa_labeled,None,"Long Answer to Final Decision",train
220
+ pubmed_qa,pqa_labeled,None,"Question Answering (Short)",train
221
+ riddle_sense,None,None,"most_suitable_answer",validation
222
+ riddle_sense,None,None,"answer_given_question_without_options",validation
223
+ riddle_sense,None,None,"question_to_answer_index",validation
224
+ riddle_sense,None,None,"question_answering",validation
225
+ scicite,None,None,"Classify intent w/section (select choice)",validation
226
+ scicite,None,None,"Classify intent (choices first)",validation
227
+ scicite,None,None,"Classify intent (select choice)",validation
228
+ scicite,None,None,"Classify intent",validation
229
+ scicite,None,None,"can_describe",validation
230
+ selqa,answer_selection_analysis,None,"is-he-talking-about",validation
231
+ selqa,answer_selection_analysis,None,"would-make-sense-qu-rand",validation
232
+ selqa,answer_selection_analysis,None,"make-sense-rand",validation
233
+ selqa,answer_selection_analysis,None,"which-answer-1st-vs-random",validation
234
+ snips_built_in_intents,None,None,"voice_intent",train
235
+ snips_built_in_intents,None,None,"categorize_query",train
236
+ snips_built_in_intents,None,None,"intent_query",train
237
+ snips_built_in_intents,None,None,"categorize_query_brief",train
238
+ snips_built_in_intents,None,None,"query_intent",train
239
+ )
240
+
241
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS_VAL[\$SLURM_ARRAY_TASK_ID]}"
242
+ echo "\$ARGUMENT"
243
+
244
+ # Run T0 evaluation
245
+ # For PrefixLM add --prefixlm
246
+ IFS=',' read dataset_name dataset_config_name template_config_name template_name split <<< "\${DATASET_AND_CONFIG}"
247
+ python t-zero/evaluation/run_eval.py \
248
+ --dataset_name "\$dataset_name" \
249
+ --dataset_config_name "\$dataset_config_name" \
250
+ --template_config_name "\$template_config_name" \
251
+ --template_name "\$template_name" \
252
+ --split "\$split" \
253
+ --model_name_or_path "\$CHECKPOINT_PATH" \
254
+ --output_dir "\$OUTPUT_DIR" \
255
+ --per_device_eval_batch_size 4 \
256
+ --max_length 2048 \
257
+ --dtype float16
258
+ EOT
259
+
260
+ sbatch $eval_script
261
+
262
+
263
+ lm_eval_script="./lm_eval_$i.slurm"
264
+ cat <<EOT > $lm_eval_script
265
+ #!/bin/bash
266
+ #SBATCH --job-name=lmeval
267
+ #SBATCH --nodes=1
268
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
269
+ #SBATCH --cpus-per-task=8 # number of cores per tasks
270
+ #SBATCH --hint=nomultithread # we get physical cores not logical
271
+ #SBATCH --gres=gpu:1 # number of gpus
272
+ #SBATCH --constraint=a100
273
+ #SBATCH --time 20:00:00 # maximum execution time (HH:MM:SS)
274
+ #SBATCH --output=%x-%j.out # output file name
275
+ #SBATCH --account=six@a100
276
+ #SBATCH --array=0-22
277
+
278
+ set -x -e
279
+
280
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
281
+ conda activate muennighofflmevalgen
282
+
283
+ echo "START TIME: $(date)"
284
+
285
+ # defining the right environment variables
286
+ export TRANSFORMERS_CACHE=$six_ALL_CCFRWORK/models
287
+ export HF_DATASETS_CACHE=$six_ALL_CCFRWORK/datasets
288
+ export HF_MODULES_CACHE=$six_ALL_CCFRWORK/modules
289
+ export HF_METRICS_CACHE=$six_ALL_CCFRWORK/metrics
290
+ export HF_DATASETS_OFFLINE=1
291
+ export TRANSFORMERS_OFFLINE=1
292
+ export TOKENIZERS_PARALLELISM=false
293
+
294
+ # Converted transformer checkpoint
295
+ MODEL_CKPT=$OUTPUTCKPT
296
+
297
+ cd /gpfsscratch/rech/six/commun/experiments/muennighoff/lm-evaluation-harness
298
+
299
+
300
+ DATASETS_AND_CONFIGS=(
301
+ wmt14_fr_en,fr-en,"version-en-fr-target"
302
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-target"
303
+ wmt14_fr_en,fr-en,"a_good_translation-en-fr-source+target"
304
+ wmt14_fr_en,fr-en,"xglm-en-fr-target"
305
+ wmt14_fr_en,fr-en,"gpt3-en-fr"
306
+ wmt14_fr_en,fr-en,"version-fr-en-target"
307
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-target"
308
+ wmt14_fr_en,fr-en,"a_good_translation-fr-en-source+target"
309
+ wmt14_fr_en,fr-en,"xglm-fr-en-target"
310
+ wmt14_fr_en,fr-en,"gpt3-fr-en"
311
+ wmt14_hi_en,hi-en,"version-en-hi-target"
312
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-target"
313
+ wmt14_hi_en,hi-en,"a_good_translation-en-hi-source+target"
314
+ wmt14_hi_en,hi-en,"xglm-en-hi-target"
315
+ wmt14_hi_en,hi-en,"gpt-3-en-hi-target"
316
+ wmt14_hi_en,hi-en,"version-hi-en-target"
317
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-target"
318
+ wmt14_hi_en,hi-en,"a_good_translation-hi-en-source+target"
319
+ wmt14_hi_en,hi-en,"xglm-hi-en-target"
320
+ wmt14_hi_en,hi-en,"gpt-3-hi-en-target"
321
+ mlsum_es,"es","layman_summ_es"
322
+ mlsum_es,"es","palm_prompt"
323
+ mlsum_es,"es","summarise_this_in_es_few_sentences"
324
+ )
325
+
326
+ DATASET_AND_CONFIG="\${DATASETS_AND_CONFIGS[\$SLURM_ARRAY_TASK_ID]}"
327
+ echo "\$ARGUMENT"
328
+
329
+ IFS=',' read dataset_name lang template_name <<< "\${DATASET_AND_CONFIG}"
330
+
331
+ # Use this fork of lm-eval: https://github.com/bigscience-workshop/lm-evaluation-harness/pull/109
332
+ python main.py \
333
+ --model_api_name 'hf-causal' \
334
+ --model_args "pretrained=\$MODEL_CKPT,use_accelerate=True,tokenizer=\$MODEL_CKPT,dtype=float16" \
335
+ --device cuda \
336
+ --batch_size 16 \
337
+ --no_tracking \
338
+ --task_name "\$dataset_name" \
339
+ --template_names "\$template_name" \
340
+ --bootstrap_iters 10 \
341
+ --limit 3000
342
+
343
+ mkdir -p "$OUTPUTCKPT/evaluation/\$dataset_name"
344
+ mv "outputs/*$CKPT*\$dataset_name*" "$OUTPUTCKPT/evaluation/\$dataset_name/"
345
+
346
+ echo "END TIME: $(date)"
347
+ EOT
348
+
349
+ sbatch $lm_eval_script
350
+
351
+
352
+ done