applied-ai-018 commited on
Commit
8c68fe4
·
verified ·
1 Parent(s): d75ad39

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. lm-evaluation-harness/.flake8 +5 -0
  6. lm-evaluation-harness/.github/workflows/new_tasks.yml +72 -0
  7. lm-evaluation-harness/.github/workflows/publish.yml +78 -0
  8. lm-evaluation-harness/.github/workflows/unit_tests.yml +70 -0
  9. lm-evaluation-harness/.gitignore +23 -0
  10. lm-evaluation-harness/13 +0 -0
  11. lm-evaluation-harness/14 +0 -0
  12. lm-evaluation-harness/69 +0 -0
  13. lm-evaluation-harness/70 +0 -0
  14. lm-evaluation-harness/78 +0 -0
  15. lm-evaluation-harness/9 +0 -0
  16. lm-evaluation-harness/CITATION.bib +10 -0
  17. lm-evaluation-harness/LICENSE.md +21 -0
  18. lm-evaluation-harness/default_config.yaml +17 -0
  19. lm-evaluation-harness/requirements.txt +1 -0
  20. lm-evaluation-harness/tests/__init__.py +0 -0
  21. lm-evaluation-harness/tests/test_cli.py +43 -0
  22. lm-evaluation-harness/tests/test_evaluator.py +65 -0
  23. lm-evaluation-harness/tests/test_janitor.py +507 -0
  24. lm-evaluation-harness/tests/test_misc.py +14 -0
  25. lm-evaluation-harness/tests/test_requests_caching.py +123 -0
  26. lm-evaluation-harness/tests/test_tasks.py +122 -0
  27. lm-evaluation-harness/tests/test_utils.py +400 -0
  28. lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood +1 -0
  29. lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood +1 -0
  30. lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-res.json +1 -0
  31. lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood +1 -0
  32. lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-res.json +1 -0
  33. lm-evaluation-harness/tests/testdata/qqp-v0-res.json +1 -0
  34. lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json +1 -0
  35. lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until +1 -0
  36. lm-evaluation-harness/tests/utils.py +49 -0
  37. venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png +3 -0
  38. venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png +3 -0
  39. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py +13 -0
  40. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py +904 -0
  44. venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py +358 -0
  45. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py +108 -0
  46. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -147,3 +147,4 @@ venv/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=
147
  venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
148
  venv/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
149
  venv/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
 
 
147
  venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
148
  venv/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
149
  venv/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
150
+ venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5afb46af646b04955d57ec05d6e264aaa3500236054033e6fa62bef95ae0e5f0
3
+ size 33555612
ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19b335940233d87ea08cc7bf12a8231f8a889669140c3b98a348158a34a75c49
3
+ size 33555533
ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06883c027934439a2ce6c54efd16daa9af70ddb18efe15f61598d998092f6582
3
+ size 33555533
lm-evaluation-harness/.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore = E203, E266, E501, W503, F403, F401, C901
3
+ max-line-length = 127
4
+ max-complexity = 10
5
+ select = B,C,E,F,W,T4,B9
lm-evaluation-harness/.github/workflows/new_tasks.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tasks Modified
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - 'main'
7
+ pull_request:
8
+ branches:
9
+ - 'main'
10
+ workflow_dispatch:
11
+ # comment/edit out the above to stop/change the triggers
12
+ jobs:
13
+ changed_files:
14
+ runs-on: ubuntu-latest # windows-latest || macos-latest
15
+ timeout-minutes: 120
16
+ name: Scan for changed tasks
17
+ steps:
18
+ - name: checkout
19
+ uses: actions/checkout@v3
20
+ with:
21
+ fetch-depth: 2 # OR "2" -> To retrieve the preceding commit.
22
+
23
+ # Uses the tj-actions/changed-files@v37 action to check for changes.
24
+ # Outputs provided here: https://github.com/tj-actions/changed-files#outputs
25
+ # The `files_yaml` input optionally takes a yaml string to specify filters,
26
+ # and prepends the filter name to the standard output names.
27
+ - name: Check task folders
28
+ id: changed-tasks
29
+ uses: tj-actions/[email protected]
30
+ with:
31
+ # tasks checks the tasks folder and api checks the api folder for changes
32
+ files_yaml: |
33
+ tasks:
34
+ - lm_eval/tasks/**
35
+ api:
36
+ - lm_eval/api/**
37
+ write_output_files: true
38
+
39
+ # The next step is optional; the files are written to the workspace by default (above).
40
+ # so it's just for debugging
41
+ - name: Run Tests
42
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
43
+ run: |
44
+ echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV'
45
+ echo "One or more test file(s) has changed."
46
+ echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}"
47
+
48
+ - name: Set up Python 3.9
49
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
50
+ uses: actions/setup-python@v4
51
+ with:
52
+ python-version: 3.9
53
+ cache: 'pip'
54
+ cache-dependency-path: setup.py
55
+ - name: Install dependencies
56
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
57
+ run: |
58
+ python -m pip install --upgrade pip
59
+ pip install -e '.[dev]' --extra-index-url https://download.pytorch.org/whl/cpu
60
+ # Install optional git dependencies
61
+ # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt
62
+ # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
63
+ - name: Test with pytest
64
+ # if new tasks are added, run tests on them
65
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true'
66
+ run: python -m pytest tests/test_tasks.py -s -vv
67
+ # if api is modified, run tests on it
68
+ - name: Test more tasks with pytest
69
+ env:
70
+ API: true
71
+ if: steps.changed-tasks.outputs.api_any_modified == 'true'
72
+ run: python -m pytest tests/test_tasks.py -s -vv
lm-evaluation-harness/.github/workflows/publish.yml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish Python distribution to PyPI
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ - '*'
7
+
8
+ jobs:
9
+ build:
10
+ name: Build distribution
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+ - name: Set up Python
16
+ uses: actions/setup-python@v4
17
+ with:
18
+ python-version: "3.x"
19
+
20
+ - name: Install pypa/build
21
+ run: >-
22
+ python3 -m
23
+ pip install
24
+ build
25
+ --user
26
+ - name: Build a binary wheel and a source tarball
27
+ run: python3 -m build
28
+ - name: Store the distribution packages
29
+ uses: actions/upload-artifact@v3
30
+ with:
31
+ name: python-package-distributions
32
+ path: dist/
33
+
34
+ publish-to-pypi:
35
+ name: >-
36
+ Publish Python distribution to PyPI
37
+ if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
38
+ needs:
39
+ - build
40
+ runs-on: ubuntu-latest
41
+ environment:
42
+ name: pypi
43
+ url: https://pypi.org/p/lm_eval
44
+ permissions:
45
+ id-token: write # IMPORTANT: mandatory for trusted publishing
46
+
47
+ steps:
48
+ - name: Download all the dists
49
+ uses: actions/download-artifact@v3
50
+ with:
51
+ name: python-package-distributions
52
+ path: dist/
53
+ - name: Publish distribution to PyPI
54
+ uses: pypa/gh-action-pypi-publish@release/v1
55
+
56
+ publish-to-testpypi:
57
+ name: Publish Python distribution to TestPyPI
58
+ needs:
59
+ - build
60
+ runs-on: ubuntu-latest
61
+
62
+ environment:
63
+ name: testpypi
64
+ url: https://test.pypi.org/p/lm_eval
65
+
66
+ permissions:
67
+ id-token: write # IMPORTANT: mandatory for trusted publishing
68
+
69
+ steps:
70
+ - name: Download all the dists
71
+ uses: actions/download-artifact@v3
72
+ with:
73
+ name: python-package-distributions
74
+ path: dist/
75
+ - name: Publish distribution to TestPyPI
76
+ uses: pypa/gh-action-pypi-publish@release/v1
77
+ with:
78
+ repository-url: https://test.pypi.org/legacy/
lm-evaluation-harness/.github/workflows/unit_tests.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2
+ # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3
+ # just comment out unwanted steps to turn off the test.
4
+ name: Unit Tests
5
+
6
+ on:
7
+ push:
8
+ branches:
9
+ - 'main'
10
+ pull_request:
11
+ branches:
12
+ - 'main'
13
+ workflow_dispatch:
14
+ # Jobs run concurrently and steps run sequentially within a job.
15
+ # jobs: linter and cpu_tests. Add more jobs/steps as required.
16
+ jobs:
17
+ linter:
18
+ name: Linters
19
+ runs-on: ubuntu-latest
20
+ timeout-minutes: 5
21
+
22
+ steps:
23
+ - name: Checkout Code
24
+ uses: actions/checkout@v4
25
+ - name: Set up Python 3.8
26
+ uses: actions/setup-python@v5
27
+ with:
28
+ python-version: 3.8
29
+ cache: pip
30
+ cache-dependency-path: pyproject.toml
31
+ - name: Pre-Commit
32
+ env:
33
+ SKIP: "no-commit-to-branch,mypy"
34
+
35
+ uses: pre-commit/[email protected]
36
+ # # mypy turned off for now
37
+ # - name: Lint with mypy
38
+ # run: mypy . --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable
39
+ # Job 2
40
+ testcpu:
41
+ name: CPU Tests
42
+ runs-on: ubuntu-latest
43
+ strategy:
44
+ matrix:
45
+ python-version: [ "3.8", "3.9", "3.10", "3.11" ]
46
+ timeout-minutes: 30
47
+ steps:
48
+ - name: Checkout Code
49
+ uses: actions/checkout@v4
50
+ - name: Set up Python ${{ matrix.python-version }}
51
+ uses: actions/setup-python@v5
52
+ with:
53
+ python-version: ${{ matrix.python-version }}
54
+ cache: pip
55
+ cache-dependency-path: pyproject.toml
56
+ - name: Install dependencies
57
+ run: |
58
+ python -m pip install --upgrade pip
59
+ pip install -e '.[dev,anthropic,sentencepiece,optimum,deepsparse,sparseml]' --extra-index-url https://download.pytorch.org/whl/cpu
60
+ # Install optional git dependencies
61
+ # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt
62
+ # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
63
+ - name: Test with pytest
64
+ run: python -m pytest --showlocals -s -vv -n=auto
65
+ - name: Archive artifacts
66
+ uses: actions/upload-artifact@v3
67
+ with:
68
+ name: output_results
69
+ path: |
70
+ test_logs/*
lm-evaluation-harness/.gitignore ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ env
2
+ *.pyc
3
+ output/
4
+ data/
5
+ lm_cache
6
+ .idea
7
+ build
8
+ dist
9
+ *.egg-info
10
+ venv
11
+ .vscode/
12
+ temp
13
+ __pycache__
14
+ .ipynb_checkpoints
15
+ temp
16
+ # IPython
17
+ profile_default/
18
+ ipython_config.py
19
+ # don't track (the default location of) the cached requests
20
+ lm_eval/caching/.cache
21
+ # don't track files created by wandb
22
+ wandb
23
+ examples/wandb
lm-evaluation-harness/13 ADDED
File without changes
lm-evaluation-harness/14 ADDED
File without changes
lm-evaluation-harness/69 ADDED
File without changes
lm-evaluation-harness/70 ADDED
File without changes
lm-evaluation-harness/78 ADDED
File without changes
lm-evaluation-harness/9 ADDED
File without changes
lm-evaluation-harness/CITATION.bib ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ @misc{eval-harness,
2
+ author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
3
+ title = {A framework for few-shot language model evaluation},
4
+ month = 12,
5
+ year = 2023,
6
+ publisher = {Zenodo},
7
+ version = {v0.4.0},
8
+ doi = {10.5281/zenodo.10256836},
9
+ url = {https://zenodo.org/records/10256836}
10
+ }
lm-evaluation-harness/LICENSE.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 EleutherAI
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
lm-evaluation-harness/default_config.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ distributed_type: MULTI_GPU
4
+ downcast_bf16: 'no'
5
+ enable_cpu_affinity: false
6
+ gpu_ids: all
7
+ machine_rank: 0
8
+ main_training_function: main
9
+ mixed_precision: 'no'
10
+ num_machines: 1
11
+ num_processes: 8
12
+ rdzv_backend: static
13
+ same_network: true
14
+ tpu_env: []
15
+ tpu_use_cluster: false
16
+ tpu_use_sudo: false
17
+ use_cpu: false
lm-evaluation-harness/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ -e .
lm-evaluation-harness/tests/__init__.py ADDED
File without changes
lm-evaluation-harness/tests/test_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import pytest
4
+
5
+ import lm_eval.__main__
6
+
7
+
8
+ def test_cli_parse_error():
9
+ """
10
+ Assert error raised if cli args argument doesn't have type
11
+ """
12
+ with pytest.raises(ValueError):
13
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
14
+ parser.add_argument(
15
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
16
+ )
17
+ parser.add_argument(
18
+ "--tasks",
19
+ "-t",
20
+ default=None,
21
+ metavar="task1,task2",
22
+ help="To get full list of tasks, use the command lm-eval --tasks list",
23
+ )
24
+ lm_eval.__main__.check_argument_types(parser)
25
+
26
+
27
+ def test_cli_parse_no_error():
28
+ """
29
+ Assert typed arguments are parsed correctly
30
+ """
31
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
32
+ parser.add_argument(
33
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
34
+ )
35
+ parser.add_argument(
36
+ "--tasks",
37
+ "-t",
38
+ type=str,
39
+ default=None,
40
+ metavar="task1,task2",
41
+ help="To get full list of tasks, use the command lm-eval --tasks list",
42
+ )
43
+ lm_eval.__main__.check_argument_types(parser)
lm-evaluation-harness/tests/test_evaluator.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import lm_eval.base as base
2
+ from typing import List
3
+
4
+ import pytest
5
+
6
+ # import lm_eval.models as models
7
+ import lm_eval.api as api
8
+ import lm_eval.evaluator as evaluator
9
+ from lm_eval import tasks
10
+
11
+
12
+ # TODO: more fine grained unit tests rather than this big honking integration
13
+ # test once we break evaluator into smaller, more manageable pieces
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "task_name,limit,model,model_args",
18
+ [
19
+ (
20
+ ["arc_easy"],
21
+ 10,
22
+ "hf",
23
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
24
+ )
25
+ ],
26
+ )
27
+ def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str):
28
+ task_name = task_name
29
+ limit = 10
30
+
31
+ e1 = evaluator.simple_evaluate(
32
+ model=model,
33
+ tasks=task_name,
34
+ limit=limit,
35
+ model_args=model_args,
36
+ )
37
+ assert e1 is not None
38
+
39
+ lm = api.registry.get_model(model).create_from_arg_string(
40
+ model_args,
41
+ {
42
+ "batch_size": None,
43
+ "max_batch_size": None,
44
+ "device": None,
45
+ },
46
+ )
47
+ task_manager = tasks.TaskManager()
48
+ task_dict = tasks.get_task_dict(task_name, task_manager)
49
+
50
+ e2 = evaluator.evaluate(
51
+ lm=lm,
52
+ task_dict=task_dict,
53
+ limit=limit,
54
+ )
55
+
56
+ assert e2 is not None
57
+ # check that caching is working
58
+
59
+ def r(x):
60
+ return x["results"]["arc_easy"]
61
+
62
+ assert all(
63
+ x == y
64
+ for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
65
+ )
lm-evaluation-harness/tests/test_janitor.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from lm_eval.decontamination.janitor import (
4
+ Janitor,
5
+ form_ngrams,
6
+ split_indices,
7
+ word_ngrams,
8
+ word_ngrams_indices,
9
+ )
10
+
11
+
12
+ def simple_ngram(sequence, n):
13
+ ngrams = list()
14
+ ngram = []
15
+ for x in sequence:
16
+ ngram.append(x)
17
+ if len(ngram) == n:
18
+ ngrams.append(tuple(ngram))
19
+ ngram = ngram[1:]
20
+
21
+ return ngrams
22
+
23
+
24
+ def test_form_ngrams():
25
+ sequence = (
26
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
27
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
28
+ )
29
+
30
+ n_values = [1, 2, 3, 5, 13]
31
+ for n in n_values:
32
+ comparison = simple_ngram(sequence, n)
33
+ result_to_test = list(form_ngrams(iter(sequence), n))
34
+ assert len(comparison) == len(result_to_test)
35
+ assert comparison == result_to_test
36
+
37
+
38
+ def test_word_ngrams():
39
+ sequence = (
40
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
41
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
42
+ )
43
+
44
+ words = sequence.split()
45
+
46
+ n_values = [1, 2, 3, 5, 13]
47
+ for n in n_values:
48
+ comparison = simple_ngram(words, n)
49
+ comparison = [" ".join(ngram) for ngram in comparison]
50
+ result_to_test = list(word_ngrams(sequence, n))
51
+ assert len(comparison) == len(result_to_test)
52
+ assert result_to_test == comparison
53
+
54
+
55
+ def test_split_indices():
56
+ sequence = (
57
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
58
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
59
+ )
60
+
61
+ comparison = []
62
+ current_word = ""
63
+ for i, c in enumerate(sequence):
64
+ if c != " ":
65
+ current_word += c
66
+ else:
67
+ if current_word:
68
+ comparison.append((current_word, (i - len(current_word), i - 1)))
69
+ current_word = ""
70
+
71
+ if current_word:
72
+ comparison.append(
73
+ (current_word, (len(sequence) - len(current_word), len(sequence) - 1))
74
+ )
75
+ current_word = ""
76
+
77
+ result_to_test = list(split_indices(sequence))
78
+ assert len(comparison) == len(result_to_test)
79
+ assert comparison == result_to_test
80
+
81
+
82
+ def test_word_ngrams_indices():
83
+ sequence = (
84
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
85
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
86
+ )
87
+
88
+ n_values = [1, 2, 3, 5, 13]
89
+
90
+ for n in n_values:
91
+ ngrams = [" ".join(ngram) for ngram in simple_ngram(sequence.split(), n)]
92
+ tracker = defaultdict(int)
93
+ comparison = []
94
+ for ngram in ngrams:
95
+ while True:
96
+ start = sequence.find(ngram, tracker[ngram])
97
+ assert start != -1 # testing the test
98
+
99
+ end = start + len(ngram) - 1
100
+ tracker[ngram] = end + 1
101
+
102
+ # ignore partial word matches
103
+ if (start != 0 and sequence[start - 1] != " ") or (
104
+ end != len(sequence) - 1 and sequence[end + 1] != " "
105
+ ):
106
+ pass
107
+ else:
108
+ break
109
+
110
+ comparison.append((ngram, (start, end)))
111
+
112
+ result_to_test = list(word_ngrams_indices(sequence, n))
113
+ assert len(result_to_test) == len(comparison)
114
+ assert result_to_test == comparison
115
+
116
+
117
+ # Assumptions from GPT3 Paper:
118
+ # the 200 characters to remove include punctuation and is actually a half-window
119
+
120
+
121
+ # All tests below initially test without any registered contaminants, expecting the same sequence back.
122
+ def test_janitor1():
123
+ # First test using a 1gram and expected the first block before the filth to have some remaining
124
+ # characters, but the second block should be completely removed.
125
+
126
+ sequence = (
127
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
128
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
129
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
130
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
131
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
132
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
133
+ "FILTH. "
134
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
135
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
136
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
137
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
138
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
139
+ )
140
+
141
+ filth = "filth"
142
+
143
+ expected_result = (
144
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
145
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
146
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
147
+ "This is a @line #containing "
148
+ )
149
+
150
+ janitor = Janitor(
151
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
152
+ )
153
+ result = janitor.clean_python(sequence)
154
+ result = "".join(result)
155
+ assert result == sequence
156
+
157
+ janitor.register_contaminant(filth)
158
+ assert janitor.dirt_ngrams == {filth}
159
+
160
+ result = janitor.clean_python(sequence)
161
+ result = "".join(result)
162
+ assert result == expected_result
163
+
164
+
165
+ def test_janitor2():
166
+ # Second test using a 1gram and expected the first block before the filth to have some remaining
167
+ # characters, and the second block is longer then 200 characters so should also have some remaining.
168
+
169
+ sequence = (
170
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
171
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
172
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
173
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
174
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
175
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
176
+ "FILTH. "
177
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
178
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
179
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
180
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
181
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
182
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
183
+ )
184
+
185
+ filth = "filth"
186
+
187
+ expected_result = (
188
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
189
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
190
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
191
+ "This is a @line #containing "
192
+ " characters, 76 to be exact. "
193
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
194
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
195
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
196
+ )
197
+
198
+ janitor = Janitor(
199
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
200
+ )
201
+ result = janitor.clean_python(sequence)
202
+ result = "".join(result)
203
+ assert result == sequence
204
+
205
+ janitor.register_contaminant(filth)
206
+ assert janitor.dirt_ngrams == {filth}
207
+
208
+ result = janitor.clean_python(sequence)
209
+ result = "".join(result)
210
+ assert result == expected_result
211
+
212
+
213
+ def test_janitor3():
214
+ # Same test as above but with a 6gram.
215
+
216
+ sequence = (
217
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
218
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
219
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
220
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
221
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
222
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
223
+ "FILTH. lots of dirty filtHy FIlTh "
224
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
225
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
226
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
227
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
228
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
229
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
230
+ )
231
+
232
+ filth = "filth lots of dirty filthy filth"
233
+
234
+ expected_result = (
235
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
236
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
237
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
238
+ "This is a @line #containing "
239
+ " characters, 76 to be exact. "
240
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
241
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
242
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
243
+ )
244
+
245
+ janitor = Janitor(
246
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
247
+ )
248
+ result = janitor.clean_python(sequence)
249
+ result = "".join(result)
250
+ assert result == sequence
251
+
252
+ janitor.register_contaminant(filth)
253
+ assert janitor.dirt_ngrams == {filth}
254
+
255
+ result = janitor.clean_python(sequence)
256
+ result = "".join(result)
257
+ assert result == expected_result
258
+
259
+
260
+ def test_janitor4():
261
+ # This test adds another block to that from the previous. The middle block should be entirely
262
+ # removed as the 200 characters are removed from each side.
263
+
264
+ sequence = (
265
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
266
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
267
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
268
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
269
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
270
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
271
+ "FILTH. lots of dirty filtHy FIlTh "
272
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
273
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
274
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
275
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
276
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
277
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
278
+ "FILTH. lots of dirty filtHy FIlTh "
279
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
280
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
281
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
282
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
283
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
284
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
285
+ )
286
+
287
+ filth = "filth lots of dirty filthy filth"
288
+
289
+ expected_result = (
290
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
291
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
292
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
293
+ "This is a @line #containing "
294
+ " characters, 76 to be exact. "
295
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
296
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
297
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
298
+ )
299
+
300
+ janitor = Janitor(
301
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
302
+ )
303
+ result = janitor.clean_python(sequence)
304
+ result = "".join(result)
305
+ assert result == sequence
306
+
307
+ janitor.register_contaminant(filth)
308
+ assert janitor.dirt_ngrams == {filth}
309
+
310
+ result = janitor.clean_python(sequence)
311
+ result = "".join(result)
312
+ assert result == expected_result
313
+
314
+
315
+ def test_janitor5():
316
+ # Same as above but using multiple different filth 6grams.
317
+
318
+ sequence = (
319
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
320
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
321
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
322
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
323
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
324
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
325
+ "FILTH. lots of dirty filtHy FIlTh "
326
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
327
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
328
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
329
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
330
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
331
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
332
+ "FILTH. lots of filtHy dirty FIlTh "
333
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
334
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
335
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
336
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
337
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
338
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
339
+ )
340
+
341
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
342
+
343
+ expected_result = (
344
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
345
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
346
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
347
+ "This is a @line #containing "
348
+ " characters, 76 to be exact. "
349
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
350
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
351
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
352
+ )
353
+
354
+ janitor = Janitor(
355
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
356
+ )
357
+ result = janitor.clean_python(sequence)
358
+ result = "".join(result)
359
+ assert result == sequence
360
+
361
+ for filth in filths:
362
+ janitor.register_contaminant(filth)
363
+ assert janitor.dirt_ngrams == set(filths)
364
+
365
+ result = janitor.clean_python(sequence)
366
+ result = "".join(result)
367
+ assert result == expected_result
368
+
369
+
370
+ def test_janitor6():
371
+ # Same as above but now we add 10 filths and expect the same result, the following test does 11.
372
+
373
+ sequence = (
374
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
375
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
376
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
377
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
378
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
379
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
380
+ "FILTH. lots of dirty filtHy FIlTh "
381
+ "FILTH. lots of dirty filtHy FIlTh "
382
+ "FILTH. lots of dirty filtHy FIlTh "
383
+ "FILTH. lots of dirty filtHy FIlTh "
384
+ "FILTH. lots of dirty filtHy FIlTh "
385
+ "FILTH. lots of dirty filtHy FIlTh "
386
+ "FILTH. lots of dirty filtHy FIlTh "
387
+ "FILTH. lots of dirty filtHy FIlTh "
388
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
389
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
390
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
391
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
392
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
393
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
394
+ "FILTH. lots of filtHy dirty FIlTh "
395
+ "FILTH. lots of filtHy dirty FIlTh "
396
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
397
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
398
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
399
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
400
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
401
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
402
+ )
403
+
404
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
405
+
406
+ expected_result = (
407
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
408
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
409
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
410
+ "This is a @line #containing "
411
+ " characters, 76 to be exact. "
412
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
413
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
414
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
415
+ )
416
+
417
+ janitor = Janitor(
418
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
419
+ )
420
+ result = janitor.clean_python(sequence)
421
+ result = "".join(result)
422
+ assert result == sequence
423
+
424
+ for filth in filths:
425
+ janitor.register_contaminant(filth)
426
+ assert janitor.dirt_ngrams == set(filths)
427
+
428
+ result = janitor.clean_python(sequence)
429
+ result = "".join(result)
430
+ assert result == expected_result
431
+
432
+
433
+ def test_janitor7():
434
+ # Same as above but now we add 9 filths and expect the same result, the following test does 10.
435
+
436
+ sequence = (
437
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
438
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
439
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
440
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
441
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
442
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
443
+ "FILTH. lots of dirty filtHy FIlTh "
444
+ "FILTH. lots of dirty filtHy FIlTh "
445
+ "FILTH. lots of dirty filtHy FIlTh "
446
+ "FILTH. lots of dirty filtHy FIlTh "
447
+ "FILTH. lots of dirty filtHy FIlTh "
448
+ "FILTH. lots of dirty filtHy FIlTh "
449
+ "FILTH. lots of dirty filtHy FIlTh "
450
+ "FILTH. lots of dirty filtHy FIlTh "
451
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
452
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
453
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
454
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
455
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
456
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
457
+ "FILTH. lots of filtHy dirty FIlTh "
458
+ "FILTH. lots of filtHy dirty FIlTh "
459
+ "FILTH. lots of filtHy dirty FIlTh "
460
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
461
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
462
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
463
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
464
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
465
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
466
+ )
467
+
468
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
469
+
470
+ expected_result = ""
471
+
472
+ janitor = Janitor(
473
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
474
+ )
475
+ result = janitor.clean_python(sequence)
476
+ result = "".join(result)
477
+ assert result == sequence
478
+
479
+ for filth in filths:
480
+ janitor.register_contaminant(filth)
481
+ assert janitor.dirt_ngrams == set(filths)
482
+
483
+ result = janitor.clean_python(sequence)
484
+ result = "".join(result)
485
+ assert result == expected_result
486
+
487
+
488
+ def test_janitor8():
489
+ # This will test the save and load contams
490
+ pass
491
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
492
+ # contaminant = "dirty boy. Clean he he"
493
+
494
+ # jan = Janitor(ngram_n=3)
495
+ # jan.register_contaminant(contaminant)
496
+ # cleaned = " ".join(jan.clean(source))
497
+ # for contam in jan.dirt_ngrams:
498
+ # assert contam not in cleaned, contam
499
+
500
+ # filename = "data/saved_contam"
501
+ # jan.save_contamination_ngrams(filename)
502
+
503
+ # jan = Janitor(ngram_n=3)
504
+ # jan.load_contamination_ngrams(filename)
505
+ # cleaned = " ".join(jan.clean(source))
506
+ # for contam in jan.dirt_ngrams:
507
+ # assert contam not in cleaned, contam
lm-evaluation-harness/tests/test_misc.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import pytest
4
+
5
+ import lm_eval.api.metrics as metrics
6
+
7
+
8
+ def test_bootstrapping():
9
+ random.seed(42)
10
+ arr = [random.random() for _ in range(1000)]
11
+ expected = metrics.mean_stderr(arr)
12
+ bootstrapped = metrics.bootstrap_stderr(metrics.mean, arr, iters=100000)
13
+
14
+ assert bootstrapped == pytest.approx(expected, abs=1e-4)
lm-evaluation-harness/tests/test_requests_caching.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import lm_eval.base as base
2
+ import importlib
3
+ import os
4
+ import sys
5
+ from datetime import datetime
6
+ from typing import List, Tuple
7
+
8
+ import pytest
9
+ import torch
10
+
11
+ # import lm_eval.models as models
12
+ from lm_eval.caching.cache import PATH
13
+
14
+
15
+ MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
16
+
17
+ # NOTE the script this loads uses simple evaluate
18
+ # TODO potentially test both the helper script and the normal script
19
+ sys.path.append(f"{MODULE_DIR}/../scripts")
20
+ model_loader = importlib.import_module("requests_caching")
21
+ run_model_for_task_caching = model_loader.run_model_for_task_caching
22
+
23
+
24
+ DEFAULT_TASKS = ["lambada_openai", "hellaswag"]
25
+
26
+
27
+ @pytest.fixture(autouse=True)
28
+ def setup_and_teardown():
29
+ # Setup
30
+ torch.use_deterministic_algorithms(False)
31
+ clear_cache()
32
+ # Yields control back to the test function
33
+ yield
34
+ # Cleanup here
35
+
36
+
37
+ def clear_cache():
38
+ if os.path.exists(PATH):
39
+ cache_files = os.listdir(PATH)
40
+ for file in cache_files:
41
+ file_path = f"{PATH}/{file}"
42
+ os.unlink(file_path)
43
+
44
+
45
+ # leaving tasks here to allow for the option to select specific task files
46
+ def get_cache_files(tasks: List[str] = None) -> Tuple[List[str], List[str]]:
47
+ cache_files = os.listdir(PATH)
48
+
49
+ file_task_names = []
50
+
51
+ for file in cache_files:
52
+ file_without_prefix = file.split("-")[1]
53
+ file_without_prefix_and_suffix = file_without_prefix.split(".")[0]
54
+ file_task_names.append(file_without_prefix_and_suffix)
55
+
56
+ return cache_files, file_task_names
57
+
58
+
59
+ def assert_created(tasks: List[str], file_task_names: List[str]):
60
+ tasks.sort()
61
+ file_task_names.sort()
62
+
63
+ assert tasks == file_task_names
64
+
65
+
66
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
67
+ def test_requests_caching_true(tasks: List[str]):
68
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
69
+
70
+ cache_files, file_task_names = get_cache_files()
71
+
72
+ assert_created(tasks=tasks, file_task_names=file_task_names)
73
+
74
+
75
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
76
+ def test_requests_caching_refresh(tasks: List[str]):
77
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
78
+
79
+ timestamp_before_test = datetime.now().timestamp()
80
+
81
+ run_model_for_task_caching(tasks=tasks, cache_requests="refresh")
82
+
83
+ cache_files, file_task_names = get_cache_files()
84
+
85
+ for file in cache_files:
86
+ modification_time = os.path.getmtime(f"{PATH}/{file}")
87
+ assert modification_time > timestamp_before_test
88
+
89
+ tasks.sort()
90
+ file_task_names.sort()
91
+
92
+ assert tasks == file_task_names
93
+
94
+
95
+ @pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
96
+ def test_requests_caching_delete(tasks: List[str]):
97
+ # populate the data first, rerun this test within this test for additional confidence
98
+ test_requests_caching_true(tasks=tasks)
99
+
100
+ run_model_for_task_caching(tasks=tasks, cache_requests="delete")
101
+
102
+ cache_files, file_task_names = get_cache_files()
103
+
104
+ assert len(cache_files) == 0
105
+
106
+
107
+ # useful for locally running tests through the debugger
108
+ if __name__ == "__main__":
109
+
110
+ def run_tests():
111
+ tests = [
112
+ test_requests_caching_true,
113
+ test_requests_caching_refresh,
114
+ test_requests_caching_delete,
115
+ ]
116
+
117
+ for test_func in tests:
118
+ clear_cache()
119
+ test_func(tasks=DEFAULT_TASKS)
120
+
121
+ print("Tests pass")
122
+
123
+ run_tests()
lm-evaluation-harness/tests/test_tasks.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import islice
2
+
3
+ import pytest
4
+
5
+ import lm_eval.tasks as tasks
6
+ from lm_eval.api.task import ConfigurableTask
7
+
8
+ from .utils import new_tasks
9
+
10
+
11
+ task_manager = tasks.TaskManager()
12
+ # Default Task
13
+ TASKS = ["arc_easy"]
14
+
15
+
16
+ def task_class():
17
+ global TASKS
18
+ # CI: new_tasks checks if any modifications have been made
19
+ task_classes = new_tasks()
20
+ # Check if task_classes is empty
21
+ if task_classes:
22
+ return list(task_manager.load_task_or_group(task_classes).values())
23
+ else:
24
+ return list(task_manager.load_task_or_group(TASKS).values())
25
+
26
+
27
+ @pytest.fixture()
28
+ def limit() -> int:
29
+ return 10
30
+
31
+
32
+ # Tests
33
+ @pytest.mark.parametrize("task_class", task_class(), ids=lambda x: f"{x.config.task}")
34
+ class TestNewTasks:
35
+ def test_download(self, task_class: ConfigurableTask):
36
+ task_class.download()
37
+ assert task_class.dataset is not None
38
+
39
+ def test_has_training_docs(self, task_class: ConfigurableTask):
40
+ assert task_class.has_training_docs() in [True, False]
41
+
42
+ def test_check_training_docs(self, task_class: ConfigurableTask):
43
+ if task_class.has_training_docs():
44
+ assert task_class._config["training_split"] is not None
45
+
46
+ def test_has_validation_docs(self, task_class):
47
+ assert task_class.has_validation_docs() in [True, False]
48
+
49
+ def test_check_validation_docs(self, task_class):
50
+ if task_class.has_validation_docs():
51
+ assert task_class._config["validation_split"] is not None
52
+
53
+ def test_has_test_docs(self, task_class):
54
+ assert task_class.has_test_docs() in [True, False]
55
+
56
+ def test_check_test_docs(self, task_class):
57
+ task = task_class
58
+ if task.has_test_docs():
59
+ assert task._config["test_split"] is not None
60
+
61
+ def test_should_decontaminate(self, task_class):
62
+ task = task_class
63
+ assert task.should_decontaminate() in [True, False]
64
+ if task.should_decontaminate():
65
+ assert task._config["doc_to_decontamination_query"] is not None
66
+
67
+ def test_doc_to_text(self, task_class, limit):
68
+ task = task_class
69
+ arr = (
70
+ list(islice(task.test_docs(), limit))
71
+ if task.has_test_docs()
72
+ else list(islice(task.validation_docs(), limit))
73
+ )
74
+ _array = [task.doc_to_text(doc) for doc in arr]
75
+ # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on
76
+ assert all(
77
+ isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True)
78
+ for x in _array
79
+ )
80
+
81
+ def test_create_choices(self, task_class, limit):
82
+ task = task_class
83
+ arr = (
84
+ list(islice(task.test_docs(), limit))
85
+ if task.has_test_docs()
86
+ else list(islice(task.validation_docs(), limit))
87
+ )
88
+ if "multiple_choice" in task._config.output_type:
89
+ _array = [task.doc_to_choice(doc) for doc in arr]
90
+ # assert all(len(x) == 4 for x in _array)
91
+ assert all(isinstance(x, list) for x in _array)
92
+ assert all(isinstance(x[0], str) for x in _array)
93
+
94
+ def test_doc_to_target(self, task_class, limit):
95
+ task = task_class
96
+ arr = (
97
+ list(islice(task.test_docs(), limit))
98
+ if task.has_test_docs()
99
+ else list(islice(task.validation_docs(), limit))
100
+ )
101
+ _array_target = [task.doc_to_target(doc) for doc in arr]
102
+ if task._config.output_type == "multiple_choice":
103
+ assert all(isinstance(label, int) for label in _array_target)
104
+ # _array_text = [task.doc_to_text(doc) for doc in arr]
105
+ # Not working
106
+ # assert all(tgt[0] == " " or txt[-1] == "\n" if len(txt) != 0 else True for txt, tgt in zip(_array_text, _array_target))
107
+
108
+ def test_build_all_requests(self, task_class, limit):
109
+ task_class.build_all_requests(rank=1, limit=limit, world_size=1)
110
+ assert task_class.instances is not None
111
+
112
+ # ToDO: Add proper testing
113
+ def test_construct_requests(self, task_class, limit):
114
+ task = task_class
115
+ arr = (
116
+ list(islice(task.test_docs(), limit))
117
+ if task.has_test_docs()
118
+ else list(islice(task.validation_docs(), limit))
119
+ )
120
+ requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
121
+ # assert all(isinstance(doc, list) for doc in requests)
122
+ assert len(requests) == limit if limit else True
lm-evaluation-harness/tests/test_utils.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy as np
4
+ import pytest
5
+ import torch
6
+
7
+ from lm_eval.api.metrics import (
8
+ aggregate_subtask_metrics,
9
+ mean,
10
+ pooled_sample_stderr,
11
+ stderr_for_metric,
12
+ )
13
+ from lm_eval.models.utils import Collator
14
+ from lm_eval.utils import (
15
+ get_rolling_token_windows,
16
+ make_disjoint_window,
17
+ )
18
+
19
+
20
+ # noinspection DuplicatedCode
21
+ def test_get_rolling_token_windows_v1():
22
+ gold = [
23
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
24
+ (
25
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
26
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
27
+ ),
28
+ (
29
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
30
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
31
+ ),
32
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [30, 31, 32, 33]),
33
+ ]
34
+ x = list(range(34))
35
+ generator = get_rolling_token_windows(
36
+ token_list=x,
37
+ prefix_token=-100,
38
+ max_seq_len=10,
39
+ context_len=1,
40
+ )
41
+ pred_length = 0
42
+ output = []
43
+ for input_tokens, pred_tokens in generator:
44
+ output.append((input_tokens, pred_tokens))
45
+ pred_length += len(pred_tokens)
46
+ assert pred_length == len(x)
47
+ assert gold == output
48
+
49
+
50
+ # noinspection DuplicatedCode
51
+ def test_get_rolling_token_windows_v2():
52
+ gold = [
53
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
54
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [10, 11, 12]),
55
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [13, 14, 15]),
56
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [16, 17, 18]),
57
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [19, 20, 21]),
58
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [22, 23, 24]),
59
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [25, 26, 27]),
60
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [28, 29, 30]),
61
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [31, 32, 33]),
62
+ ]
63
+ x = list(range(34))
64
+ generator = get_rolling_token_windows(
65
+ token_list=x,
66
+ prefix_token=-100,
67
+ max_seq_len=10,
68
+ context_len=8,
69
+ )
70
+ pred_length = 0
71
+ output = []
72
+ for input_tokens, pred_tokens in generator:
73
+ output.append((input_tokens, pred_tokens))
74
+ pred_length += len(pred_tokens)
75
+ assert pred_length == len(x)
76
+ assert gold == output
77
+
78
+
79
+ # noinspection DuplicatedCode
80
+ def test_get_rolling_token_windows_v3():
81
+ gold = [
82
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
83
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
84
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
85
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
86
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
87
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
88
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
89
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
90
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
91
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
92
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
93
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
94
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
95
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
96
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
97
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
98
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
99
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
100
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
101
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
102
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
103
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30]),
104
+ ([21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31]),
105
+ ([22, 23, 24, 25, 26, 27, 28, 29, 30, 31], [32]),
106
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33]),
107
+ ]
108
+ x = list(range(34))
109
+ generator = get_rolling_token_windows(
110
+ token_list=x,
111
+ prefix_token=-100,
112
+ max_seq_len=10,
113
+ context_len=10,
114
+ )
115
+ pred_length = 0
116
+ output = []
117
+ for input_tokens, pred_tokens in generator:
118
+ output.append((input_tokens, pred_tokens))
119
+ pred_length += len(pred_tokens)
120
+ assert pred_length == len(x)
121
+ assert gold == output
122
+
123
+
124
+ # noinspection DuplicatedCode
125
+ def test_get_rolling_token_windows_v4():
126
+ gold = [
127
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
128
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
129
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
130
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
131
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
132
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
133
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
134
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
135
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
136
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
137
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
138
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
139
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
140
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
141
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
142
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
143
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
144
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
145
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
146
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
147
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
148
+ ]
149
+ x = list(range(30))
150
+ generator = get_rolling_token_windows(
151
+ token_list=x,
152
+ prefix_token=-100,
153
+ max_seq_len=10,
154
+ context_len=10,
155
+ )
156
+ pred_length = 0
157
+ output = []
158
+ for input_tokens, pred_tokens in generator:
159
+ output.append((input_tokens, pred_tokens))
160
+ pred_length += len(pred_tokens)
161
+ assert pred_length == len(x)
162
+ assert gold == output
163
+
164
+
165
+ # noinspection DuplicatedCode
166
+ def test_get_rolling_token_windows_v5():
167
+ gold = [
168
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
169
+ (
170
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
171
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
172
+ ),
173
+ (
174
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
175
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
176
+ ),
177
+ ]
178
+ x = list(range(30))
179
+ generator = get_rolling_token_windows(
180
+ token_list=x,
181
+ prefix_token=-100,
182
+ max_seq_len=10,
183
+ context_len=1,
184
+ )
185
+ pred_length = 0
186
+ output = []
187
+ for input_tokens, pred_tokens in generator:
188
+ output.append((input_tokens, pred_tokens))
189
+ pred_length += len(pred_tokens)
190
+ assert pred_length == len(x)
191
+ assert gold == output
192
+
193
+
194
+ # noinspection DuplicatedCode
195
+ def test_get_rolling_token_windows_v6():
196
+ gold = [
197
+ ([-100, 0], [0, 1]),
198
+ ([1, 2], [2, 3]),
199
+ ([3, 4], [4, 5]),
200
+ ([5, 6], [6, 7]),
201
+ ([6, 7], [8]),
202
+ ]
203
+ x = list(range(9))
204
+ generator = get_rolling_token_windows(
205
+ token_list=x,
206
+ prefix_token=-100,
207
+ max_seq_len=2,
208
+ context_len=1,
209
+ )
210
+ pred_length = 0
211
+ output = []
212
+ for input_tokens, pred_tokens in generator:
213
+ output.append((input_tokens, pred_tokens))
214
+ pred_length += len(pred_tokens)
215
+ assert pred_length == len(x)
216
+ assert gold == output
217
+
218
+
219
+ def test_get_rolling_token_windows_empty():
220
+ generator = get_rolling_token_windows(
221
+ token_list=[],
222
+ prefix_token=-100,
223
+ max_seq_len=2,
224
+ context_len=1,
225
+ )
226
+ n = 0
227
+ for _ in generator:
228
+ n += 1
229
+ assert n == 0
230
+
231
+
232
+ def test_make_disjoint_window():
233
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])) == (
234
+ [1],
235
+ [2, 3, 4, 5, 6],
236
+ )
237
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [4, 5, 6])) == ([1, 2, 3], [4, 5, 6])
238
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [6])) == ([1, 2, 3, 4, 5], [6])
239
+
240
+
241
+ class TestCollator:
242
+ def make_generate_sample(self, end=10):
243
+ strings = ["x" * i for i in range(1, end + 1)]
244
+ gen_kwargs1, gen_kwargs2 = (
245
+ {"temperature": 0},
246
+ {"temperature": 0, "until": ["nn", "\n\n"]},
247
+ )
248
+ args = [
249
+ (string, gen_kwargs1 if i < len(strings) // 2 else gen_kwargs2)
250
+ for i, string in enumerate(strings)
251
+ ]
252
+
253
+ return args
254
+
255
+ def make_loglikelihood_sample(self, end=11):
256
+ samples = [
257
+ (("x", "x"), list(range(1, total_length + 1)))
258
+ for total_length in range(1, end + 1)
259
+ ]
260
+ return samples
261
+
262
+ def make_loglikelihood_sample_group(self, end=11):
263
+ a = [(("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x]) for x in range(9)]
264
+ b = [
265
+ (("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x, y, z])
266
+ for x, y, z in zip(range(9), range(9, 18), range(18, 27))
267
+ ]
268
+ return a + b
269
+
270
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 9)])
271
+ def test_generations(self, batch_size, end):
272
+ _collate_gen = lambda x: (-len(x[0]), x[0]) # noqa: E731
273
+
274
+ generation_samples = self.make_generate_sample(int(end))
275
+ gens = Collator(generation_samples, _collate_gen, group_by="gen_kwargs")
276
+ chunks = gens.get_batched(n=int(batch_size), batch_fn=None)
277
+ output = []
278
+ for chunks in chunks:
279
+ # check batching
280
+ group_one = end // 2
281
+ group_two = end - end // 2
282
+ assert (
283
+ len(chunks) <= batch_size
284
+ if batch_size != 0
285
+ else len(chunks) in [group_one, group_two]
286
+ )
287
+ # check if reorder-er is working correctly
288
+ assert all(
289
+ len(chunks[i][0]) <= len(chunks[i - 1][0])
290
+ for i in range(1, len(chunks))
291
+ )
292
+ # check if grouping correctly
293
+ assert all(x[1] == chunks[0][1] for x in chunks)
294
+ for x in chunks:
295
+ output.append(x)
296
+ reordered_output = gens.get_original(output)
297
+ # check get original
298
+ assert reordered_output == generation_samples
299
+
300
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 3)])
301
+ def test_loglikelihood(self, batch_size, end):
302
+ _collate_log = lambda x: (-len(x[1]), tuple(x[1])) # noqa: E731
303
+ loglikelihood_samples = self.make_loglikelihood_sample(int(end))
304
+ loglikelihoods = Collator(
305
+ loglikelihood_samples,
306
+ _collate_log,
307
+ )
308
+ chunks = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
309
+ output = []
310
+ for chunks in chunks:
311
+ # check batching
312
+ assert len(chunks) <= batch_size if batch_size != 0 else len(chunks) == end
313
+ # check reorder
314
+ assert all(
315
+ len(chunks[i][1]) <= len(chunks[i - 1][1])
316
+ for i in range(1, len(chunks))
317
+ )
318
+ for x in chunks:
319
+ output.append(x[1])
320
+ # check indices
321
+ reordered_output = loglikelihoods.get_original(output)
322
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
323
+
324
+ @pytest.mark.parametrize("batch_size", [17, 8, 12, 0])
325
+ def test_context_grouping(self, batch_size):
326
+ def _collate(x):
327
+ toks = x[1] + x[2]
328
+ return -len(toks), tuple(toks)
329
+
330
+ _collate_log = _collate # noqa: E731
331
+ loglikelihood_samples = self.make_loglikelihood_sample_group()
332
+ loglikelihoods = Collator(
333
+ loglikelihood_samples,
334
+ _collate_log,
335
+ group_fn=lambda a: a[-2] + a[-1][:-1],
336
+ group_by="contexts",
337
+ )
338
+ chunks = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
339
+ output = []
340
+ outputs_ = []
341
+ for chunks in chunks:
342
+ # check batching
343
+ if batch_size != 0:
344
+ assert len(chunks) <= batch_size
345
+ # check reorder
346
+ assert all(
347
+ len(chunks[i][1]) <= len(chunks[i - 1][1])
348
+ for i in range(1, len(chunks))
349
+ )
350
+ for x in chunks:
351
+ for request_str, cont_toks, logits in loglikelihoods.get_cache(
352
+ req_str="".join(x[0]),
353
+ cxt_toks=x[1],
354
+ cont_toks=x[2],
355
+ logits=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
356
+ .unsqueeze(0)
357
+ .unsqueeze(0),
358
+ ):
359
+ output.append(x[1])
360
+ outputs_.append(cont_toks)
361
+ assert len(output) == len(outputs_)
362
+ # check indices
363
+ reordered_output = loglikelihoods.get_original(output)
364
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
365
+
366
+
367
+ def test_aggregate_mean():
368
+ # test weight_by_size is respected
369
+ assert (
370
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=False)
371
+ == 0.3
372
+ )
373
+ assert (
374
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=True)
375
+ == 0.3375
376
+ )
377
+
378
+
379
+ @pytest.mark.parametrize(
380
+ "samples",
381
+ [
382
+ [40 * [1.0] + 60 * [0.0], 30 * [1.0] + 30 * [0.0], 20 * [1.0] + 60 * [0.0]],
383
+ [35 * [1.0] + 65 * [0.0], 20 * [1.0] + 20 * [0.0]],
384
+ ],
385
+ )
386
+ def test_aggregate_stderrs(samples):
387
+ # check that aggregating subtasks' bootstrap stderrs with our formula
388
+ # (using weight_by_size) is ~equiv.
389
+ # to just getting bootstrap stderr of the whole set of samples
390
+ mean_stderr = stderr_for_metric(metric=mean, bootstrap_iters=100000)
391
+
392
+ stderrs = [mean_stderr(subtask) for subtask in samples]
393
+
394
+ sizes = [len(subtask) for subtask in samples]
395
+
396
+ assert np.allclose(
397
+ pooled_sample_stderr(stderrs, sizes),
398
+ mean_stderr(list(itertools.chain.from_iterable(samples))),
399
+ atol=1.0e-3,
400
+ )
lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9852b38612db8c6adf938a5d8a7a9e5ce9e655259d6cc806b142506fcaff0ed4
lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8021db8de46850090ddae6e6ec2d382029c3027b7c69884607503f916d09b709
lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-electrical_engineering": {"acc": 0.2689655172413793, "acc_norm": 0.2827586206896552, "acc_norm_stderr": 0.037528339580033376, "acc_stderr": 0.036951833116502325}}, "versions": {"hendrycksTest-electrical_engineering": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4b07922fa1d549b655c21440b13d869263ce7dd9771d8147c450f11c91d26c10
lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_pubmed-central": {"bits_per_byte": 1.5812411832795375e-05, "byte_perplexity": 1.0000158125368497, "word_perplexity": 1.000123107107861}}, "versions": {"pile_pubmed-central": 0}}
lm-evaluation-harness/tests/testdata/qqp-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"qqp": {"acc": 0.49782339846648527, "acc_stderr": 0.0024866770696239894, "f1": 0.42322661288031593, "f1_stderr": 0.002695903831328166}}, "versions": {"qqp": 0}}
lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-ja-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.010703148854351403, "chrf_stderr": 0.00022242113108130186, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ja-en": 0}}
lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 1477ab6542c26bd0222cc1aded174f33bf8d04d1cf6a1c0959aeca4ff3779adc
lm-evaluation-harness/tests/utils.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Union
3
+
4
+ from lm_eval.utils import load_yaml_config
5
+
6
+
7
+ # {{{CI}}}
8
+ # This is the path where the output for the changed files for the tasks folder is stored
9
+ # FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt"
10
+
11
+
12
+ # reads a text file and returns a list of words
13
+ # used to read the output of the changed txt from tj-actions/changed-files
14
+ def load_changed_files(file_path: str) -> List[str]:
15
+ with open(file_path, "r") as f:
16
+ content = f.read()
17
+ words_list = [x for x in content.split()]
18
+ return words_list
19
+
20
+
21
+ # checks the txt file for list of changed files.
22
+ # if file ends with .yaml then check yaml and load the config.
23
+ # if the config task is a string, it's a task config.
24
+ # if the config task is a list, it's a group config.
25
+ def parser(full_path: List[str]) -> List[str]:
26
+ _output = set()
27
+ for x in full_path:
28
+ if os.path.exists(x) and x.endswith(".yaml"):
29
+ config = load_yaml_config(x, mode="simple")
30
+ if isinstance(config["task"], str):
31
+ _output.add(config["task"])
32
+ elif isinstance(config["task"], list):
33
+ _output.add(config["group"])
34
+ return list(_output)
35
+
36
+
37
+ def new_tasks() -> Union[List[str], None]:
38
+ FILENAME = ".github/outputs/tasks_all_changed_and_modified_files.txt"
39
+ if os.path.exists(FILENAME):
40
+ # If tasks folder has changed then we get the list of files from FILENAME
41
+ # and parse the yaml files to get the task names.
42
+ return parser(load_changed_files(FILENAME))
43
+ elif os.getenv("API") is not None:
44
+ # Or if API has changed then we set the ENV variable API to True
45
+ # and run given tasks.
46
+ return ["arc_easy", "hellaswag", "piqa", "wikitext"]
47
+ # if both not true just do arc_easy
48
+ else:
49
+ return
venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png ADDED

Git LFS Details

  • SHA256: e41ba0d3dbf2a20f82bb79a4cbba5bb458dec396ccbdba5ed195d6b200ca7f2e
  • Pointer size: 129 Bytes
  • Size of remote file: 8.81 kB
venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png ADDED

Git LFS Details

  • SHA256: 92e71558103d03df0ea5c47876277968b5d4ca8ab8cf43b80b73cce9d962052c
  • Pointer size: 130 Bytes
  • Size of remote file: 10 kB
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (201 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc ADDED
Binary file (21.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py ADDED
@@ -0,0 +1,904 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import json
17
+ import os
18
+ import re
19
+ import sys
20
+ import types
21
+
22
+ import torch
23
+
24
+ from transformers import AutoTokenizer, GPT2Config
25
+ from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME, shard_checkpoint
26
+
27
+
28
+ def add_checkpointing_args(parser):
29
+ parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository")
30
+ parser.add_argument(
31
+ "--convert_checkpoint_from_megatron_to_transformers",
32
+ action="store_true",
33
+ help=(
34
+ "If True, convert a Megatron checkpoint to a Transformers checkpoint. "
35
+ "If False, convert a Transformers checkpoint to a Megatron checkpoint."
36
+ ),
37
+ )
38
+ parser.add_argument(
39
+ "--load_path",
40
+ type=str,
41
+ required=True,
42
+ help="Path to the checkpoint to convert.",
43
+ )
44
+ parser.add_argument(
45
+ "--save_path",
46
+ type=str,
47
+ required=True,
48
+ help="Path to the converted checkpoint.",
49
+ )
50
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
51
+ return parser
52
+
53
+
54
+ def add_megatron_checkpoint_args(parser):
55
+ parser.add_argument(
56
+ "--target_tensor_model_parallel_size",
57
+ type=int,
58
+ default=1,
59
+ help=(
60
+ "The tensor model parallel size of the converted checkpoint. "
61
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
62
+ ),
63
+ )
64
+ parser.add_argument(
65
+ "--target_pipeline_model_parallel_size",
66
+ type=int,
67
+ default=1,
68
+ help=(
69
+ "The pipeline model parallel size of the converted checkpoint. "
70
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
71
+ ),
72
+ )
73
+ parser.add_argument(
74
+ "--target_data_parallel_size",
75
+ type=int,
76
+ default=1,
77
+ help=(
78
+ "The data parallel size of the converted checkpoint. "
79
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
80
+ ),
81
+ )
82
+ parser.add_argument(
83
+ "--target_params_dtype",
84
+ type=str,
85
+ default="fp32",
86
+ help=(
87
+ "The dtype of the converted checkpoint. "
88
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
89
+ ),
90
+ )
91
+ parser.add_argument(
92
+ "--make_vocab_size_divisible_by",
93
+ type=int,
94
+ default=128,
95
+ help=(
96
+ "Pad the vocab size to be divisible by this value. "
97
+ "This is added for computational efficieny reasons. "
98
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
99
+ ),
100
+ )
101
+ parser.add_argument(
102
+ "--use_distributed_optimizer",
103
+ action="store_true",
104
+ help=(
105
+ "If True, use the distributed optimizer. "
106
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
107
+ ),
108
+ )
109
+ return parser
110
+
111
+
112
+ def add_transformers_checkpoint_args(parser):
113
+ parser.add_argument(
114
+ "--tokenizer_name",
115
+ type=str,
116
+ default=None,
117
+ help=(
118
+ "The name of the pre-trained tokenizer to save. "
119
+ "If not None, the tokenizer will be saved. "
120
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
121
+ ),
122
+ )
123
+ parser.add_argument(
124
+ "--max_shard_size",
125
+ type=str,
126
+ default="10GB",
127
+ help=(
128
+ "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size "
129
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). "
130
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
131
+ ),
132
+ )
133
+
134
+ return parser
135
+
136
+
137
+ # The simple map of names for "automated" rules.
138
+ megatron_to_transformers = {
139
+ "attention.dense": ".attn.c_proj.",
140
+ "self_attention.dense": ".attn.c_proj.",
141
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
142
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
143
+ }
144
+ transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()}
145
+
146
+ tensor_parallel_params = [
147
+ # megatron-lm layers to merge across tp ranks
148
+ "self_attention.query_key_value.weight",
149
+ "self_attention.query_key_value.bias",
150
+ "self_attention.dense.weight",
151
+ "mlp.dense_h_to_4h.weight",
152
+ "mlp.dense_h_to_4h.bias",
153
+ "mlp.dense_4h_to_h.weight",
154
+ # deprecated
155
+ "attention.query_key_value.weight",
156
+ "attention.query_key_value.bias",
157
+ "attention.dense.weight",
158
+ # transformers layers to split across tp ranks
159
+ "attn.c_attn.weight",
160
+ "attn.c_attn.bias",
161
+ "attn.c_proj.weight",
162
+ "mlp.c_fc.weight",
163
+ "mlp.c_fc.bias",
164
+ "mlp.c_proj.weight",
165
+ ]
166
+
167
+
168
+ def recursive_print(name, val, spaces=0):
169
+ """
170
+ Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`
171
+
172
+ Args:
173
+ name (str): the name of the current tensor parameter
174
+ val (Tuple(int)): the shape of the current tensor parameter
175
+ spaces (int): the number of spaces to print before the output for a nested structure
176
+ """
177
+ # Format the message.
178
+ if name is None:
179
+ msg = None
180
+ else:
181
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
182
+ msg = fmt.format(name)
183
+
184
+ # Print and recurse (if needed).
185
+ if isinstance(val, dict):
186
+ if msg is not None:
187
+ print(msg)
188
+ for k in val.keys():
189
+ recursive_print(k, val[k], spaces + 2)
190
+ elif isinstance(val, torch.Tensor):
191
+ print(msg, ":", val.size())
192
+ else:
193
+ print(msg, ":", val)
194
+
195
+
196
+ def megatron_to_transformers_fix_query_key_value_ordering(
197
+ param, checkpoint_version, num_splits, num_heads, hidden_size
198
+ ):
199
+ """
200
+ Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions
201
+ of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints:
202
+ https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the
203
+ self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2.
204
+ This function is taken from `convert_megatron_gpt2_checkpoint.py`
205
+
206
+ Args:
207
+ param (torch.Tensor): the tensor to permute
208
+ checkpoint_version (int): the version of the checkpoint.
209
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
210
+ num_heads (int): the number of attention heads
211
+ hidden_size (int): the hidden size per head
212
+ """
213
+
214
+ input_shape = param.size()
215
+ if checkpoint_version == 1.0:
216
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
217
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
218
+ param = param.view(*saved_shape)
219
+ param = param.transpose(0, 2)
220
+ param = param.transpose(1, 2).contiguous()
221
+ elif checkpoint_version >= 2.0:
222
+ # other versions store [num_heads * num_splits * hidden_size, :]
223
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
224
+ param = param.view(*saved_shape)
225
+ param = param.transpose(0, 1).contiguous()
226
+ param = param.view(*input_shape)
227
+ return param
228
+
229
+
230
+ def transformers_to_megatron_fix_query_key_value_ordering(
231
+ param, checkpoint_version, num_splits, num_heads, hidden_size
232
+ ):
233
+ """
234
+ Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input
235
+ is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version
236
+ 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the
237
+ self-attention block, the param needs to be already transposed before calling this function.
238
+
239
+ Args:
240
+ param (torch.Tensor): the tensor to permute
241
+ checkpoint_version (int): the version of the checkpoint.
242
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
243
+ num_heads (int): the number of attention heads
244
+ hidden_size (int): the hidden size per head
245
+ """
246
+
247
+ # Input is [num_splits * num_heads * hidden_size, :]
248
+ input_shape = param.size()
249
+ if checkpoint_version == 1.0:
250
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
251
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
252
+ param = param.view(*current_shape)
253
+ param = param.transpose(0, 2)
254
+ param = param.transpose(1, 2).contiguous()
255
+ elif checkpoint_version >= 2.0:
256
+ # other versions store [num_heads * num_splits * hidden_size, :]
257
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
258
+ param = param.view(*current_shape)
259
+ param = param.transpose(0, 1).contiguous()
260
+ param = param.view(*input_shape)
261
+ return param
262
+
263
+
264
+ def merge_transformers_sharded_states(path, num_checkpoints):
265
+ """
266
+ Merge sharded checkpoints from transformers into a single checkpoint.
267
+
268
+ Args:
269
+ path (str): the path to the sharded checkpoints
270
+ num_checkpoints (int): the number of checkpoints to merge
271
+ """
272
+ state_dict = {}
273
+ for i in range(1, num_checkpoints + 1):
274
+ checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin")
275
+ current_chunk = torch.load(checkpoint_path, map_location="cpu")
276
+ state_dict.update(current_chunk)
277
+ return state_dict
278
+
279
+
280
+ def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank):
281
+ """
282
+ Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline
283
+ parallel size and pipeline parallel rank.
284
+
285
+ Args:
286
+ args (argparse.Namespace): the arguments to the script
287
+ tp_size (int): the tensor parallel size
288
+ pp_size (int): the pipeline parallel size
289
+ pp_rank (int): the pipeline parallel rank
290
+ """
291
+ tp_state_dicts = []
292
+ for i in range(tp_size):
293
+ sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}"
294
+ for checkpoint_name in ["model_optim_rng.pt", "model_rng.pt"]:
295
+ checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name)
296
+ if os.path.isfile(checkpoint_path):
297
+ break
298
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
299
+ tp_state_dicts.append(state_dict)
300
+ return tp_state_dicts
301
+
302
+
303
+ def get_element_from_dict_by_path(d, path):
304
+ """
305
+ Get element from dictionary by path. If element is not present, recursively add empty dictionaries.
306
+
307
+ Args:
308
+ d (dict): the dictionary to get the element from
309
+ path (list): the path to the element which is delimited by "."
310
+ """
311
+ path = path.split(".")
312
+ for k in path:
313
+ if k not in d:
314
+ d[k] = {}
315
+ d = d[k]
316
+ return d
317
+
318
+
319
+ def convert_checkpoint_from_megatron_to_transformers(args):
320
+ """
321
+ Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints
322
+ with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards
323
+ using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of
324
+ `convert_megatron_gpt2_checkpoint.py`
325
+
326
+ Args:
327
+ args (argparse.Namespace): the arguments to the script
328
+ """
329
+ # Load Megatron-LM checkpoint arguments from the state dict
330
+ sub_dirs = os.listdir(args.load_path)
331
+ possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"]
332
+ for sub_dir in possible_sub_dirs:
333
+ if sub_dir in sub_dirs:
334
+ rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0]
335
+ rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name)
336
+ break
337
+ print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}")
338
+ state_dict = torch.load(rank0_checkpoint_path, map_location="cpu")
339
+ megatron_args = state_dict.get("args", None)
340
+ if megatron_args is None:
341
+ raise ValueError(
342
+ "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints"
343
+ " containing all the megatron arguments. This is because it loads all config related to model"
344
+ " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to"
345
+ " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron"
346
+ " arguments to use this utility."
347
+ )
348
+
349
+ # Create Transformers GPT2 config from Megatron-LM arguments
350
+ if megatron_args is not None:
351
+ if megatron_args.bias_gelu_fusion:
352
+ activation_function = "gelu_fast"
353
+ elif megatron_args.openai_gelu:
354
+ activation_function = "gelu_new"
355
+ else:
356
+ activation_function = "gelu"
357
+ else:
358
+ # in the very early days this used to be "gelu_new"
359
+ activation_function = "gelu_new"
360
+ vocab_size = (
361
+ megatron_args.padded_vocab_size
362
+ if getattr(megatron_args, "orig_vocab_size", None) is None
363
+ else megatron_args.orig_vocab_size
364
+ )
365
+ print(vocab_size)
366
+
367
+ config = GPT2Config(
368
+ vocab_size=vocab_size,
369
+ n_positions=megatron_args.max_position_embeddings,
370
+ n_embd=megatron_args.hidden_size,
371
+ n_layer=megatron_args.num_layers,
372
+ n_head=megatron_args.num_attention_heads,
373
+ n_inner=megatron_args.ffn_hidden_size,
374
+ activation_function=activation_function,
375
+ resid_pdrop=0.1,
376
+ embd_pdrop=0.1,
377
+ attn_pdrop=0.1,
378
+ layer_norm_epsilon=1e-5,
379
+ initializer_range=0.02,
380
+ summary_type="cls_index",
381
+ summary_use_proj=True,
382
+ summary_activation=None,
383
+ summary_proj_to_labels=True,
384
+ summary_first_dropout=0.1,
385
+ scale_attn_weights=True,
386
+ use_cache=True,
387
+ bos_token_id=vocab_size - 1,
388
+ eos_token_id=vocab_size - 1,
389
+ architectures=["GPT2LMHeadModel"],
390
+ )
391
+
392
+ output_state_dict = {}
393
+
394
+ checkpoint_version = state_dict.get("checkpoint_version", 0.0)
395
+ tp_size = megatron_args.tensor_model_parallel_size
396
+ pp_size = megatron_args.pipeline_model_parallel_size
397
+ dtype = torch.float32
398
+ # The regex to extract layer names.
399
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
400
+
401
+ # Convert.
402
+ print("Converting")
403
+
404
+ # Embeddings
405
+ print("Converting embeddings")
406
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0)
407
+
408
+ # Convert and store the position embeddings.
409
+ position_embeddings = get_element_from_dict_by_path(
410
+ tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight"
411
+ )
412
+ output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype)
413
+
414
+ # Convert and store the word embeddings.
415
+ word_embeddings = torch.cat(
416
+ [
417
+ get_element_from_dict_by_path(
418
+ tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight"
419
+ )
420
+ for tp_rank in range(tp_size)
421
+ ],
422
+ dim=0,
423
+ )
424
+ word_embeddings = word_embeddings[:vocab_size].to(dtype)
425
+ output_state_dict["transformer.wte.weight"] = word_embeddings
426
+
427
+ # Transformer Layers
428
+ print("Converting transformer layers")
429
+ # The number of heads.
430
+ heads = config.n_head
431
+ # The hidden_size per head.
432
+ hidden_size_per_head = config.n_embd // config.n_head
433
+ n_positions = config.n_positions
434
+ num_layers = config.num_hidden_layers // pp_size
435
+
436
+ for pp_rank in range(pp_size):
437
+ if pp_size > 0:
438
+ print(f"Converting pipeline parallel rank {pp_rank}")
439
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank)
440
+
441
+ # The transformer.
442
+ path = (
443
+ "model.language_model.transformer"
444
+ if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys()
445
+ else "model.language_model.encoder"
446
+ )
447
+ # Extract the layers.
448
+ for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items():
449
+ # Match the name.
450
+ m = layer_re.match(key)
451
+ # Stop if that's not a layer
452
+ if m is None:
453
+ break
454
+
455
+ # The index of the layer.
456
+ layer_idx = int(m.group(1)) + pp_rank * num_layers
457
+ # The name of the operation.
458
+ op_name = m.group(2)
459
+ # Is it a weight or a bias?
460
+ weight_or_bias = m.group(3)
461
+
462
+ # The name of the layer.
463
+ layer_name = f"transformer.h.{layer_idx}"
464
+
465
+ if op_name + "." + weight_or_bias not in tensor_parallel_params:
466
+ params = val.to(dtype)
467
+ else:
468
+ dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0
469
+ params = torch.cat(
470
+ [val]
471
+ + [
472
+ get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key]
473
+ for tp_rank in range(1, tp_size)
474
+ ],
475
+ dim=dim,
476
+ ).to(dtype)
477
+
478
+ # For layernorm(s), simply store the layer norm.
479
+ if op_name.endswith("layernorm"):
480
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
481
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params
482
+
483
+ # Transpose the QKV matrix.
484
+ elif (
485
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
486
+ ) and weight_or_bias == "weight":
487
+ # Insert a tensor of 1x1xDxD bias.
488
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view(
489
+ 1, 1, n_positions, n_positions
490
+ )
491
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
492
+
493
+ # Insert a "dummy" tensor for masked_bias.
494
+ masked_bias = torch.tensor(-1e4, dtype=dtype)
495
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
496
+
497
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
498
+ params,
499
+ checkpoint_version,
500
+ 3,
501
+ heads,
502
+ hidden_size_per_head,
503
+ )
504
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
505
+ out_val = out_val.transpose(0, 1).contiguous()
506
+ # Store.
507
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
508
+
509
+ # Transpose the bias.
510
+ elif (
511
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
512
+ ) and weight_or_bias == "bias":
513
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
514
+ params, checkpoint_version, 3, heads, hidden_size_per_head
515
+ )
516
+ # Store. No change of shape.
517
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
518
+
519
+ # Transpose the weights.
520
+ elif weight_or_bias == "weight":
521
+ out_name = megatron_to_transformers[op_name]
522
+ output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1)
523
+
524
+ # Copy the bias.
525
+ elif weight_or_bias == "bias":
526
+ out_name = megatron_to_transformers[op_name]
527
+ output_state_dict[layer_name + out_name + "bias"] = params
528
+
529
+ if config.n_layer != (layer_idx + 1):
530
+ raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}")
531
+
532
+ # The final layernorm.
533
+ print("Converting final layernorm")
534
+ params = get_element_from_dict_by_path(tp_state_dicts[0], str(path))
535
+ output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype)
536
+ output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype)
537
+
538
+ # For LM head, transformers' wants the matrix to weight embeddings.
539
+ print("Converting LM head")
540
+ output_state_dict["lm_head.weight"] = word_embeddings.to(dtype)
541
+
542
+ # It should be done!
543
+ print("Conversion from Megatron-LM to Transformers is done!")
544
+
545
+ # Print the structure of converted state dict.
546
+ if args.print_checkpoint_structure:
547
+ recursive_print(None, output_state_dict)
548
+
549
+ # Add tokenizer class info to config
550
+ # see https://github.com/huggingface/transformers/issues/13906)
551
+
552
+ if args.tokenizer_name is None:
553
+ tokenizer_name = "openai-community/gpt2"
554
+ else:
555
+ tokenizer_name = args.tokenizer_name
556
+
557
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
558
+ tokenizer_class = type(tokenizer).__name__
559
+ config.tokenizer_class = tokenizer_class
560
+
561
+ # Store the config to file.
562
+ print("Saving config")
563
+ config.save_pretrained(args.save_path)
564
+
565
+ # Save tokenizer based on args
566
+ if args.tokenizer_name is not None:
567
+ print(f"Adding {tokenizer_class} tokenizer files")
568
+ tokenizer.save_pretrained(args.save_path)
569
+
570
+ # Store the state_dict to file.
571
+ max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size
572
+ shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size)
573
+
574
+ # Save the model
575
+ for shard_file, shard in shards.items():
576
+ torch.save(shard, os.path.join(args.save_path, shard_file))
577
+
578
+ if index is None:
579
+ print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}")
580
+ else:
581
+ save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME)
582
+ # Save the index as well
583
+ with open(save_index_file, "w", encoding="utf-8") as f:
584
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
585
+ f.write(content)
586
+ print(
587
+ f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be "
588
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
589
+ f"index located at {save_index_file}."
590
+ )
591
+
592
+
593
+ def convert_checkpoint_from_transformers_to_megatron(args):
594
+ """
595
+ Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable
596
+ tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers
597
+ which can have multiple shards.
598
+
599
+ Args:
600
+ args (argparse.Namespace): the arguments to the script
601
+
602
+ """
603
+ os.makedirs(args.save_path, exist_ok=True)
604
+ # Search in directory above this
605
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
606
+ if args.megatron_path is not None:
607
+ sys.path.insert(0, args.megatron_path)
608
+
609
+ try:
610
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
611
+ except ModuleNotFoundError:
612
+ print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
613
+ exit(1)
614
+
615
+ # load the transformers model state dict and config
616
+ sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")]
617
+ if len(sub_dirs) == 1:
618
+ checkpoint_name = "pytorch_model.bin"
619
+ state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu")
620
+ else:
621
+ num_checkpoints = len(sub_dirs) - 1
622
+ state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints)
623
+
624
+ config = GPT2Config.from_pretrained(args.load_path)
625
+
626
+ # Saving the tracker file
627
+ tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt")
628
+ with open(tracker_filepath, "w") as f:
629
+ f.write("release")
630
+
631
+ # create `release` dir in args.load_path
632
+ release_dir = os.path.join(args.save_path, "release")
633
+ os.makedirs(release_dir, exist_ok=True)
634
+
635
+ # megatron args
636
+ megatron_args = {
637
+ "orig_vocab_size": config.vocab_size,
638
+ "max_position_embeddings": config.n_positions,
639
+ "hidden_size": config.n_embd,
640
+ "num_layers": config.n_layer,
641
+ "num_attention_heads": config.n_head,
642
+ "ffn_hidden_size": config.n_inner,
643
+ "tensor_model_parallel_size": args.target_tensor_model_parallel_size,
644
+ "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size,
645
+ "data_parallel_size": args.target_data_parallel_size,
646
+ "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by,
647
+ "rank": 0,
648
+ "tokenizer_type": "GPT2BPETokenizer",
649
+ }
650
+
651
+ if config.activation_function == "gelu":
652
+ megatron_args["bias_gelu_fusion"] = False
653
+ megatron_args["openai_gelu"] = False
654
+ elif config.activation_function == "gelu_fast":
655
+ megatron_args["bias_gelu_fusion"] = True
656
+ megatron_args["openai_gelu"] = False
657
+ elif config.activation_function == "gelu_new":
658
+ megatron_args["bias_gelu_fusion"] = False
659
+ megatron_args["openai_gelu"] = True
660
+
661
+ margs = types.SimpleNamespace()
662
+ for k, v in megatron_args.items():
663
+ setattr(margs, k, v)
664
+
665
+ # params dtype
666
+ if args.target_params_dtype == "fp16":
667
+ dtype = torch.float16
668
+ elif args.target_params_dtype == "bf16":
669
+ dtype = torch.bfloat16
670
+ else:
671
+ dtype = torch.float32
672
+ setattr(margs, "params_dtype", dtype)
673
+
674
+ # save dummy optim state dict
675
+ dummy_optim_state_dict = {}
676
+ dummy_optim_state_dict["optimizer"] = {
677
+ "step": 0,
678
+ "param_groups": [
679
+ {
680
+ "lr": 0.0,
681
+ "beta1": 0.0,
682
+ "beta2": 0.0,
683
+ "eps": 0.0,
684
+ "weight_decay": 0.0,
685
+ "correct_bias": False,
686
+ "params": [],
687
+ }
688
+ ],
689
+ }
690
+ if args.use_distributed_optimizer:
691
+ for i in range(args.target_pipeline_model_parallel_size):
692
+ for j in range(args.target_tensor_model_parallel_size):
693
+ for k in range(args.target_data_parallel_size):
694
+ if args.target_pipeline_model_parallel_size == 1:
695
+ checkpoint_dir = f"mp_rank_{j:02d}_{k:03d}"
696
+ else:
697
+ checkpoint_dir = f"mp_rank_{j:02d}_{i:03d}_{k:03d}"
698
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
699
+ os.makedirs(checkpoint_dir, exist_ok=True)
700
+ torch.save(
701
+ dummy_optim_state_dict,
702
+ os.path.join(checkpoint_dir, "optim.pt"),
703
+ )
704
+
705
+ # Convert.
706
+ print("Converting")
707
+ output_state_dict = []
708
+ for i in range(args.target_tensor_model_parallel_size):
709
+ output_state_dict.append({})
710
+
711
+ # Embedding layer
712
+ print("converting embedding layer")
713
+ pos_embedding = state_dict["transformer.wpe.weight"].to(dtype)
714
+ word_embedding = state_dict["transformer.wte.weight"].to(dtype)
715
+ orig_vocab_size = config.vocab_size
716
+ padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs)
717
+ setattr(margs, "padded_vocab_size", padded_vocab_size)
718
+ # Cut out extra padding we don't need
719
+ if orig_vocab_size > padded_vocab_size:
720
+ full_word_embed = word_embedding[0:padded_vocab_size, :]
721
+ # Expanding embedding to larger size by replicating final entry
722
+ elif orig_vocab_size < padded_vocab_size:
723
+ padding_size = padded_vocab_size - orig_vocab_size
724
+ full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1)))
725
+ # Same size!
726
+ else:
727
+ full_word_embed = word_embedding
728
+
729
+ # Split into new tensor model parallel sizes
730
+ out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0)
731
+ for i in range(args.target_tensor_model_parallel_size):
732
+ pos_emb_dict = get_element_from_dict_by_path(
733
+ output_state_dict[i], "model.language_model.embedding.position_embeddings"
734
+ )
735
+ pos_emb_dict["weight"] = pos_embedding
736
+
737
+ word_emb_dict = get_element_from_dict_by_path(
738
+ output_state_dict[i], "model.language_model.embedding.word_embeddings"
739
+ )
740
+ word_emb_dict["weight"] = out_word_embed[i].clone()
741
+
742
+ # Transformer layers
743
+ print("converting transformer layers")
744
+ if config.num_attention_heads % args.target_tensor_model_parallel_size != 0:
745
+ raise ValueError(
746
+ f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of tensor parallelism"
747
+ f" ({args.target_tensor_model_parallel_size})"
748
+ )
749
+
750
+ if config.num_hidden_layers % args.target_pipeline_model_parallel_size != 0:
751
+ raise ValueError(
752
+ f"Number of layers ({config.num_hidden_layers}) must be divisible by number of pipeline parallelism"
753
+ f" ({args.target_pipeline_model_parallel_size})"
754
+ )
755
+
756
+ num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size
757
+
758
+ layer_re = re.compile(r"transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
759
+ # The number of heads.
760
+ heads = config.n_head
761
+ # The hidden_size per head.
762
+ hidden_size_per_head = config.n_embd // config.n_head
763
+ for pp_rank in range(args.target_pipeline_model_parallel_size):
764
+ layer_offset = pp_rank * num_layers
765
+ if pp_rank > 0:
766
+ output_state_dict = []
767
+ for i in range(args.target_tensor_model_parallel_size):
768
+ output_state_dict.append({})
769
+
770
+ for layer in range(num_layers):
771
+ pp_layer_id = layer + layer_offset
772
+ layers_to_copy = [
773
+ layer_name
774
+ for layer_name in state_dict.keys()
775
+ if layer_name.startswith(f"transformer.h.{pp_layer_id}.")
776
+ ]
777
+
778
+ for layer_name in layers_to_copy:
779
+ m = layer_re.match(layer_name)
780
+ # Stop if that's not a layer
781
+ if m is None:
782
+ break
783
+
784
+ # The index of the layer.
785
+ _ = int(m.group(1))
786
+ # The name of the operation.
787
+ op_name = m.group(2)
788
+ # Is it a weight or a bias?
789
+ weight_or_bias = m.group(3)
790
+
791
+ params = state_dict[layer_name].to(dtype)
792
+ # handle layernorm
793
+ if op_name.startswith("ln"):
794
+ out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm"
795
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
796
+
797
+ # handle attention K, V, Q weights
798
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight":
799
+ # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D.
800
+ params = params.transpose(0, 1).contiguous()
801
+
802
+ params = transformers_to_megatron_fix_query_key_value_ordering(
803
+ params,
804
+ 3.0,
805
+ 3,
806
+ heads,
807
+ hidden_size_per_head,
808
+ )
809
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
810
+
811
+ # handle attention K, V, Q bias
812
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias":
813
+ params = transformers_to_megatron_fix_query_key_value_ordering(
814
+ params,
815
+ 3.0,
816
+ 3,
817
+ heads,
818
+ hidden_size_per_head,
819
+ )
820
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
821
+
822
+ # handle attention and mlp weights
823
+ elif weight_or_bias == "weight":
824
+ out_name = transformers_to_megatron.get(op_name, None)
825
+ if out_name is None:
826
+ continue
827
+ params = params.transpose(0, 1)
828
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
829
+
830
+ # handle attention and mlp bias
831
+ elif weight_or_bias == "bias":
832
+ out_name = transformers_to_megatron.get(op_name, None)
833
+ if out_name is None:
834
+ continue
835
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
836
+
837
+ # skip
838
+ else:
839
+ continue
840
+
841
+ if op_name + "." + weight_or_bias in tensor_parallel_params:
842
+ dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0
843
+ params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim)
844
+
845
+ for i in range(args.target_tensor_model_parallel_size):
846
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
847
+ params_dict[layer_name] = (
848
+ params[i].clone() if (op_name + "." + weight_or_bias in tensor_parallel_params) else params
849
+ )
850
+
851
+ if pp_rank == args.target_pipeline_model_parallel_size - 1:
852
+ # handle final layernorm
853
+ for weight_or_bias in ["weight", "bias"]:
854
+ params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype)
855
+ layer_name = f"final_layernorm.{weight_or_bias}"
856
+ for i in range(args.target_tensor_model_parallel_size):
857
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
858
+ params_dict[layer_name] = params
859
+
860
+ # add the LM head
861
+ for i in range(args.target_tensor_model_parallel_size):
862
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head")
863
+ params_dict["weight"] = out_word_embed[i].clone()
864
+
865
+ # saving the state dict as per the tp_rank and pp_rank
866
+ for tp_rank in range(args.target_tensor_model_parallel_size):
867
+ output_state_dict[tp_rank]["checkpoint_version"] = 3.0
868
+ output_state_dict[tp_rank]["args"] = margs
869
+ checkpoint_dir = (
870
+ f"mp_rank_{tp_rank:02d}"
871
+ if args.target_pipeline_model_parallel_size == 1
872
+ else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}"
873
+ )
874
+ if args.use_distributed_optimizer:
875
+ checkpoint_name = "model_rng.pt"
876
+ else:
877
+ checkpoint_name = "model_optim_rng.pt"
878
+ output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"]
879
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
880
+ os.makedirs(checkpoint_dir, exist_ok=True)
881
+ checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
882
+ if args.print_checkpoint_structure:
883
+ print(
884
+ f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank"
885
+ f" {pp_rank}:"
886
+ )
887
+ recursive_print(None, output_state_dict[tp_rank])
888
+ torch.save(output_state_dict[tp_rank], checkpoint_path)
889
+
890
+
891
+ def main():
892
+ parser = argparse.ArgumentParser()
893
+ parser = add_checkpointing_args(parser)
894
+ parser = add_megatron_checkpoint_args(parser)
895
+ parser = add_transformers_checkpoint_args(parser)
896
+ args = parser.parse_args()
897
+ if args.convert_checkpoint_from_megatron_to_transformers:
898
+ convert_checkpoint_from_megatron_to_transformers(args)
899
+ else:
900
+ convert_checkpoint_from_transformers_to_megatron(args)
901
+
902
+
903
+ if __name__ == "__main__":
904
+ main()
venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ####################################################################################################
2
+
3
+ # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ ####################################################################################################
18
+
19
+ #
20
+ # Note: If when running this conversion script you're getting an exception:
21
+ # ModuleNotFoundError: No module named 'megatron.model.enums'
22
+ # you need to tell python where to find the clone of Megatron-LM, e.g.:
23
+ #
24
+ # cd /tmp
25
+ # git clone https://github.com/NVIDIA/Megatron-LM
26
+ # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
27
+ #
28
+ # if you already have it cloned elsewhere, simply adjust the path to the existing path
29
+ #
30
+ # If the training was done using a Megatron-LM fork, e.g.,
31
+ # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
32
+ # in your path, i.e., /path/to/Megatron-DeepSpeed/
33
+ #
34
+
35
+ import argparse
36
+ import os
37
+ import re
38
+ import zipfile
39
+
40
+ import torch
41
+
42
+ from transformers import AutoTokenizer, GPT2Config
43
+
44
+
45
+ ####################################################################################################
46
+
47
+
48
+ def recursive_print(name, val, spaces=0):
49
+ # Format the message.
50
+ if name is None:
51
+ msg = None
52
+ else:
53
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
54
+ msg = fmt.format(name)
55
+
56
+ # Print and recurse (if needed).
57
+ if isinstance(val, dict):
58
+ if msg is not None:
59
+ print(msg)
60
+ for k in val.keys():
61
+ recursive_print(k, val[k], spaces + 2)
62
+ elif isinstance(val, torch.Tensor):
63
+ print(msg, ":", val.size())
64
+ else:
65
+ print(msg, ":", val)
66
+
67
+
68
+ def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
69
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
70
+ # for compatibility with later versions of NVIDIA Megatron-LM.
71
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
72
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
73
+ # If param is the weight tensor of the self-attention block, the returned tensor
74
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
75
+ input_shape = param.size()
76
+ if checkpoint_version == 1.0:
77
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
78
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
79
+ param = param.view(*saved_shape)
80
+ param = param.transpose(0, 2)
81
+ param = param.transpose(1, 2).contiguous()
82
+ elif checkpoint_version >= 2.0:
83
+ # other versions store [num_heads * num_splits * hidden_size, :]
84
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
85
+ param = param.view(*saved_shape)
86
+ param = param.transpose(0, 1).contiguous()
87
+ param = param.view(*input_shape)
88
+ return param
89
+
90
+
91
+ ####################################################################################################
92
+
93
+
94
+ def convert_megatron_checkpoint(args, input_state_dict, config):
95
+ # The converted output model.
96
+ output_state_dict = {}
97
+
98
+ # old versions did not store training args
99
+ ds_args = input_state_dict.get("args", None)
100
+ if ds_args is not None:
101
+ # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
102
+ # from pprint import pprint
103
+ # pprint(vars(ds_args))
104
+
105
+ config.vocab_size = ds_args.padded_vocab_size
106
+ config.n_positions = ds_args.max_position_embeddings
107
+ config.n_embd = ds_args.hidden_size
108
+ config.n_layer = ds_args.num_layers
109
+ config.n_head = ds_args.num_attention_heads
110
+ config.n_inner = ds_args.ffn_hidden_size
111
+ # pprint(config)
112
+
113
+ # The number of heads.
114
+ heads = config.n_head
115
+ # The hidden_size per head.
116
+ hidden_size_per_head = config.n_embd // config.n_head
117
+ # Megatron-LM checkpoint version
118
+ if "checkpoint_version" in input_state_dict.keys():
119
+ checkpoint_version = input_state_dict["checkpoint_version"]
120
+ else:
121
+ checkpoint_version = 0.0
122
+
123
+ # The model.
124
+ model = input_state_dict["model"]
125
+ # The language model.
126
+ lm = model["language_model"]
127
+ # The embeddings.
128
+ embeddings = lm["embedding"]
129
+
130
+ # The word embeddings.
131
+ word_embeddings = embeddings["word_embeddings"]["weight"]
132
+ # Truncate the embedding table to vocab_size rows.
133
+ word_embeddings = word_embeddings[: config.vocab_size, :]
134
+ output_state_dict["transformer.wte.weight"] = word_embeddings
135
+
136
+ # The position embeddings.
137
+ pos_embeddings = embeddings["position_embeddings"]["weight"]
138
+ # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
139
+ n_positions = pos_embeddings.size(0)
140
+ if n_positions != config.n_positions:
141
+ raise ValueError(
142
+ f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match"
143
+ )
144
+ # Store the position embeddings.
145
+ output_state_dict["transformer.wpe.weight"] = pos_embeddings
146
+
147
+ # The transformer.
148
+ transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
149
+
150
+ # The regex to extract layer names.
151
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
152
+
153
+ # The simple map of names for "automated" rules.
154
+ megatron_to_transformers = {
155
+ "attention.dense": ".attn.c_proj.",
156
+ "self_attention.dense": ".attn.c_proj.",
157
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
158
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
159
+ }
160
+
161
+ # Extract the layers.
162
+ for key, val in transformer.items():
163
+ # Match the name.
164
+ m = layer_re.match(key)
165
+
166
+ # Stop if that's not a layer
167
+ if m is None:
168
+ break
169
+
170
+ # The index of the layer.
171
+ layer_idx = int(m.group(1))
172
+ # The name of the operation.
173
+ op_name = m.group(2)
174
+ # Is it a weight or a bias?
175
+ weight_or_bias = m.group(3)
176
+
177
+ # The name of the layer.
178
+ layer_name = f"transformer.h.{layer_idx}"
179
+
180
+ # For layernorm(s), simply store the layer norm.
181
+ if op_name.endswith("layernorm"):
182
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
183
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
184
+
185
+ # Transpose the QKV matrix.
186
+ elif (
187
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
188
+ ) and weight_or_bias == "weight":
189
+ # Insert a tensor of 1x1xDxD bias.
190
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.float16)).view(
191
+ 1, 1, n_positions, n_positions
192
+ )
193
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
194
+
195
+ # Insert a "dummy" tensor for masked_bias.
196
+ masked_bias = torch.tensor(-1e4, dtype=torch.float16)
197
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
198
+
199
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
200
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
201
+ out_val = out_val.transpose(0, 1).contiguous()
202
+ # Store.
203
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
204
+
205
+ # Transpose the bias.
206
+ elif (
207
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
208
+ ) and weight_or_bias == "bias":
209
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
210
+ # Store. No change of shape.
211
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
212
+
213
+ # Transpose the weights.
214
+ elif weight_or_bias == "weight":
215
+ out_name = megatron_to_transformers[op_name]
216
+ output_state_dict[layer_name + out_name + "weight"] = val.transpose(0, 1)
217
+
218
+ # Copy the bias.
219
+ elif weight_or_bias == "bias":
220
+ out_name = megatron_to_transformers[op_name]
221
+ output_state_dict[layer_name + out_name + "bias"] = val
222
+
223
+ # DEBUG.
224
+ assert config.n_layer == layer_idx + 1
225
+
226
+ # The final layernorm.
227
+ output_state_dict["transformer.ln_f.weight"] = transformer["final_layernorm.weight"]
228
+ output_state_dict["transformer.ln_f.bias"] = transformer["final_layernorm.bias"]
229
+
230
+ # For LM head, transformers' wants the matrix to weight embeddings.
231
+ output_state_dict["lm_head.weight"] = word_embeddings
232
+
233
+ # It should be done!
234
+ return output_state_dict
235
+
236
+
237
+ ####################################################################################################
238
+
239
+
240
+ def main():
241
+ # Create the argument parser.
242
+ parser = argparse.ArgumentParser()
243
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
244
+ parser.add_argument(
245
+ "path_to_checkpoint",
246
+ type=str,
247
+ help="Path to the checkpoint file (.zip archive or direct .pt file)",
248
+ )
249
+ parser.add_argument(
250
+ "--config_file",
251
+ default="",
252
+ type=str,
253
+ help="An optional config json file describing the pre-trained model.",
254
+ )
255
+ args = parser.parse_args()
256
+
257
+ # Extract the basename.
258
+ basename = os.path.dirname(args.path_to_checkpoint)
259
+
260
+ # Load the model.
261
+ # the .zip is very optional, let's keep it for backward compatibility
262
+ print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}")
263
+ if args.path_to_checkpoint.endswith(".zip"):
264
+ with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
265
+ with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
266
+ input_state_dict = torch.load(pytorch_dict, map_location="cpu")
267
+ else:
268
+ input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
269
+
270
+ ds_args = input_state_dict.get("args", None)
271
+
272
+ # Read the config, or default to the model released by NVIDIA.
273
+ if args.config_file == "":
274
+ if ds_args is not None:
275
+ if ds_args.bias_gelu_fusion:
276
+ activation_function = "gelu_fast"
277
+ elif ds_args.openai_gelu:
278
+ activation_function = "gelu_new"
279
+ else:
280
+ activation_function = "gelu"
281
+ else:
282
+ # in the very early days this used to be "gelu_new"
283
+ activation_function = "gelu_new"
284
+
285
+ # Spell out all parameters in case the defaults change.
286
+ config = GPT2Config(
287
+ vocab_size=50257,
288
+ n_positions=1024,
289
+ n_embd=1024,
290
+ n_layer=24,
291
+ n_head=16,
292
+ n_inner=4096,
293
+ activation_function=activation_function,
294
+ resid_pdrop=0.1,
295
+ embd_pdrop=0.1,
296
+ attn_pdrop=0.1,
297
+ layer_norm_epsilon=1e-5,
298
+ initializer_range=0.02,
299
+ summary_type="cls_index",
300
+ summary_use_proj=True,
301
+ summary_activation=None,
302
+ summary_proj_to_labels=True,
303
+ summary_first_dropout=0.1,
304
+ scale_attn_weights=True,
305
+ use_cache=True,
306
+ bos_token_id=50256,
307
+ eos_token_id=50256,
308
+ )
309
+ else:
310
+ config = GPT2Config.from_json_file(args.config_file)
311
+
312
+ config.architectures = ["GPT2LMHeadModel"]
313
+
314
+ # Convert.
315
+ print("Converting")
316
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
317
+
318
+ # Print the structure of converted state dict.
319
+ if args.print_checkpoint_structure:
320
+ recursive_print(None, output_state_dict)
321
+
322
+ # Add tokenizer class info to config
323
+ # see https://github.com/huggingface/transformers/issues/13906)
324
+ if ds_args is not None:
325
+ tokenizer_type = ds_args.tokenizer_type
326
+ if tokenizer_type == "GPT2BPETokenizer":
327
+ tokenizer_model_name = "openai-community/gpt2"
328
+ elif tokenizer_type == "PretrainedFromHF":
329
+ tokenizer_model_name = ds_args.tokenizer_name_or_path
330
+ else:
331
+ raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}")
332
+ else:
333
+ tokenizer_model_name = "openai-community/gpt2"
334
+
335
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name)
336
+ tokenizer_class = type(tokenizer).__name__
337
+ config.tokenizer_class = tokenizer_class
338
+
339
+ # Store the config to file.
340
+ print("Saving config")
341
+ config.save_pretrained(basename)
342
+
343
+ # Save tokenizer based on args
344
+ print(f"Adding {tokenizer_class} tokenizer files")
345
+ tokenizer.save_pretrained(basename)
346
+
347
+ # Store the state_dict to file.
348
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
349
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
350
+ torch.save(output_state_dict, output_checkpoint_file)
351
+
352
+
353
+ ####################################################################################################
354
+
355
+ if __name__ == "__main__":
356
+ main()
357
+
358
+ ####################################################################################################
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_sentencepiece_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
27
+ "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"],
28
+ "processing_speech_to_text": ["Speech2TextProcessor"],
29
+ }
30
+
31
+ try:
32
+ if not is_sentencepiece_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
38
+
39
+ try:
40
+ if not is_tf_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["modeling_tf_speech_to_text"] = [
46
+ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
47
+ "TFSpeech2TextForConditionalGeneration",
48
+ "TFSpeech2TextModel",
49
+ "TFSpeech2TextPreTrainedModel",
50
+ ]
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ _import_structure["modeling_speech_to_text"] = [
59
+ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
60
+ "Speech2TextForConditionalGeneration",
61
+ "Speech2TextModel",
62
+ "Speech2TextPreTrainedModel",
63
+ ]
64
+
65
+
66
+ if TYPE_CHECKING:
67
+ from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
68
+ from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
69
+ from .processing_speech_to_text import Speech2TextProcessor
70
+
71
+ try:
72
+ if not is_sentencepiece_available():
73
+ raise OptionalDependencyNotAvailable()
74
+ except OptionalDependencyNotAvailable:
75
+ pass
76
+ else:
77
+ from .tokenization_speech_to_text import Speech2TextTokenizer
78
+
79
+ try:
80
+ if not is_tf_available():
81
+ raise OptionalDependencyNotAvailable()
82
+ except OptionalDependencyNotAvailable:
83
+ pass
84
+ else:
85
+ from .modeling_tf_speech_to_text import (
86
+ TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
87
+ TFSpeech2TextForConditionalGeneration,
88
+ TFSpeech2TextModel,
89
+ TFSpeech2TextPreTrainedModel,
90
+ )
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_speech_to_text import (
99
+ SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
100
+ Speech2TextForConditionalGeneration,
101
+ Speech2TextModel,
102
+ Speech2TextPreTrainedModel,
103
+ )
104
+
105
+ else:
106
+ import sys
107
+
108
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.76 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc ADDED
Binary file (8.47 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc ADDED
Binary file (44.6 kB). View file