diff --git a/.gitattributes b/.gitattributes
index fea597b4aff34509078f9a58ca7f56b648af88f2..aa914d5612c26cde13a2e14a198eb26635baa5dc 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -147,3 +147,4 @@ venv/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=
venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8f38b12cbe49d58c39a67fafceddce93022307ff
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5afb46af646b04955d57ec05d6e264aaa3500236054033e6fa62bef95ae0e5f0
+size 33555612
diff --git a/ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d9d5e472193059ea8f65329b21d64a845d6c7d0d
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19b335940233d87ea08cc7bf12a8231f8a889669140c3b98a348158a34a75c49
+size 33555533
diff --git a/ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..321e1dcb6ac06426bea2ad888fd442e67fc617dd
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/8.mlp.dense_4h_to_h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06883c027934439a2ce6c54efd16daa9af70ddb18efe15f61598d998092f6582
+size 33555533
diff --git a/lm-evaluation-harness/.flake8 b/lm-evaluation-harness/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..73f6455d132003fce0034f41d72eeb901b68f039
--- /dev/null
+++ b/lm-evaluation-harness/.flake8
@@ -0,0 +1,5 @@
+[flake8]
+ignore = E203, E266, E501, W503, F403, F401, C901
+max-line-length = 127
+max-complexity = 10
+select = B,C,E,F,W,T4,B9
diff --git a/lm-evaluation-harness/.github/workflows/new_tasks.yml b/lm-evaluation-harness/.github/workflows/new_tasks.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0c4490f53a68f9ecb635e8678632f97604f915f5
--- /dev/null
+++ b/lm-evaluation-harness/.github/workflows/new_tasks.yml
@@ -0,0 +1,72 @@
+name: Tasks Modified
+
+on:
+ push:
+ branches:
+ - 'main'
+ pull_request:
+ branches:
+ - 'main'
+ workflow_dispatch:
+# comment/edit out the above to stop/change the triggers
+jobs:
+ changed_files:
+ runs-on: ubuntu-latest # windows-latest || macos-latest
+ timeout-minutes: 120
+ name: Scan for changed tasks
+ steps:
+ - name: checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 2 # OR "2" -> To retrieve the preceding commit.
+
+ # Uses the tj-actions/changed-files@v37 action to check for changes.
+ # Outputs provided here: https://github.com/tj-actions/changed-files#outputs
+ # The `files_yaml` input optionally takes a yaml string to specify filters,
+ # and prepends the filter name to the standard output names.
+ - name: Check task folders
+ id: changed-tasks
+ uses: tj-actions/changed-files@v37.1.2
+ with:
+ # tasks checks the tasks folder and api checks the api folder for changes
+ files_yaml: |
+ tasks:
+ - lm_eval/tasks/**
+ api:
+ - lm_eval/api/**
+ write_output_files: true
+
+ # The next step is optional; the files are written to the workspace by default (above).
+ # so it's just for debugging
+ - name: Run Tests
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
+ run: |
+ echo .github/outputs/tasks_all_changed_and_modified_files.txt >> 'GITHUB_ENV'
+ echo "One or more test file(s) has changed."
+ echo "List of all the files that have changed: ${{ steps.changed-tasks.outputs.tasks_all_modified_files }}"
+
+ - name: Set up Python 3.9
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.9
+ cache: 'pip'
+ cache-dependency-path: setup.py
+ - name: Install dependencies
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true' || steps.changed-tasks.outputs.api_any_modified == 'true'
+ run: |
+ python -m pip install --upgrade pip
+ pip install -e '.[dev]' --extra-index-url https://download.pytorch.org/whl/cpu
+ # Install optional git dependencies
+ # pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt
+ # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
+ - name: Test with pytest
+ # if new tasks are added, run tests on them
+ if: steps.changed-tasks.outputs.tasks_any_modified == 'true'
+ run: python -m pytest tests/test_tasks.py -s -vv
+ # if api is modified, run tests on it
+ - name: Test more tasks with pytest
+ env:
+ API: true
+ if: steps.changed-tasks.outputs.api_any_modified == 'true'
+ run: python -m pytest tests/test_tasks.py -s -vv
diff --git a/lm-evaluation-harness/.github/workflows/publish.yml b/lm-evaluation-harness/.github/workflows/publish.yml
new file mode 100644
index 0000000000000000000000000000000000000000..be3481754e270f28bcb65e8c75b880aa7ebf2bac
--- /dev/null
+++ b/lm-evaluation-harness/.github/workflows/publish.yml
@@ -0,0 +1,78 @@
+name: Publish Python distribution to PyPI
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ build:
+ name: Build distribution
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+
+ - name: Install pypa/build
+ run: >-
+ python3 -m
+ pip install
+ build
+ --user
+ - name: Build a binary wheel and a source tarball
+ run: python3 -m build
+ - name: Store the distribution packages
+ uses: actions/upload-artifact@v3
+ with:
+ name: python-package-distributions
+ path: dist/
+
+ publish-to-pypi:
+ name: >-
+ Publish Python distribution to PyPI
+ if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
+ needs:
+ - build
+ runs-on: ubuntu-latest
+ environment:
+ name: pypi
+ url: https://pypi.org/p/lm_eval
+ permissions:
+ id-token: write # IMPORTANT: mandatory for trusted publishing
+
+ steps:
+ - name: Download all the dists
+ uses: actions/download-artifact@v3
+ with:
+ name: python-package-distributions
+ path: dist/
+ - name: Publish distribution to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
+
+ publish-to-testpypi:
+ name: Publish Python distribution to TestPyPI
+ needs:
+ - build
+ runs-on: ubuntu-latest
+
+ environment:
+ name: testpypi
+ url: https://test.pypi.org/p/lm_eval
+
+ permissions:
+ id-token: write # IMPORTANT: mandatory for trusted publishing
+
+ steps:
+ - name: Download all the dists
+ uses: actions/download-artifact@v3
+ with:
+ name: python-package-distributions
+ path: dist/
+ - name: Publish distribution to TestPyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ repository-url: https://test.pypi.org/legacy/
diff --git a/lm-evaluation-harness/.github/workflows/unit_tests.yml b/lm-evaluation-harness/.github/workflows/unit_tests.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cdd5b1f751ea9aefc7604de3d0b38f448bab1ad2
--- /dev/null
+++ b/lm-evaluation-harness/.github/workflows/unit_tests.yml
@@ -0,0 +1,70 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+# just comment out unwanted steps to turn off the test.
+name: Unit Tests
+
+on:
+ push:
+ branches:
+ - 'main'
+ pull_request:
+ branches:
+ - 'main'
+ workflow_dispatch:
+# Jobs run concurrently and steps run sequentially within a job.
+# jobs: linter and cpu_tests. Add more jobs/steps as required.
+jobs:
+ linter:
+ name: Linters
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3.8
+ cache: pip
+ cache-dependency-path: pyproject.toml
+ - name: Pre-Commit
+ env:
+ SKIP: "no-commit-to-branch,mypy"
+
+ uses: pre-commit/action@v3.0.0
+# # mypy turned off for now
+# - name: Lint with mypy
+# run: mypy . --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable
+# Job 2
+ testcpu:
+ name: CPU Tests
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: [ "3.8", "3.9", "3.10", "3.11" ]
+ timeout-minutes: 30
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ cache-dependency-path: pyproject.toml
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -e '.[dev,anthropic,sentencepiece,optimum,deepsparse,sparseml]' --extra-index-url https://download.pytorch.org/whl/cpu
+# Install optional git dependencies
+# pip install bleurt@https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt
+# if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
+ - name: Test with pytest
+ run: python -m pytest --showlocals -s -vv -n=auto
+ - name: Archive artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: output_results
+ path: |
+ test_logs/*
diff --git a/lm-evaluation-harness/.gitignore b/lm-evaluation-harness/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..020622dfdc82dd22630c522c0f9841754aefa638
--- /dev/null
+++ b/lm-evaluation-harness/.gitignore
@@ -0,0 +1,23 @@
+env
+*.pyc
+output/
+data/
+lm_cache
+.idea
+build
+dist
+*.egg-info
+venv
+.vscode/
+temp
+__pycache__
+.ipynb_checkpoints
+temp
+# IPython
+profile_default/
+ipython_config.py
+# don't track (the default location of) the cached requests
+lm_eval/caching/.cache
+# don't track files created by wandb
+wandb
+examples/wandb
diff --git a/lm-evaluation-harness/13 b/lm-evaluation-harness/13
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/14 b/lm-evaluation-harness/14
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/69 b/lm-evaluation-harness/69
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/70 b/lm-evaluation-harness/70
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/78 b/lm-evaluation-harness/78
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/9 b/lm-evaluation-harness/9
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/CITATION.bib b/lm-evaluation-harness/CITATION.bib
new file mode 100644
index 0000000000000000000000000000000000000000..4ec33f139693aad74d2cb89c5edb2a578a315dd2
--- /dev/null
+++ b/lm-evaluation-harness/CITATION.bib
@@ -0,0 +1,10 @@
+@misc{eval-harness,
+ author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
+ title = {A framework for few-shot language model evaluation},
+ month = 12,
+ year = 2023,
+ publisher = {Zenodo},
+ version = {v0.4.0},
+ doi = {10.5281/zenodo.10256836},
+ url = {https://zenodo.org/records/10256836}
+}
diff --git a/lm-evaluation-harness/LICENSE.md b/lm-evaluation-harness/LICENSE.md
new file mode 100644
index 0000000000000000000000000000000000000000..12e6063183935e876e232db276568baf4954b492
--- /dev/null
+++ b/lm-evaluation-harness/LICENSE.md
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 EleutherAI
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lm-evaluation-harness/default_config.yaml b/lm-evaluation-harness/default_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e324549cafa573bc0f1dde5d4e488deb6945291
--- /dev/null
+++ b/lm-evaluation-harness/default_config.yaml
@@ -0,0 +1,17 @@
+compute_environment: LOCAL_MACHINE
+debug: false
+distributed_type: MULTI_GPU
+downcast_bf16: 'no'
+enable_cpu_affinity: false
+gpu_ids: all
+machine_rank: 0
+main_training_function: main
+mixed_precision: 'no'
+num_machines: 1
+num_processes: 8
+rdzv_backend: static
+same_network: true
+tpu_env: []
+tpu_use_cluster: false
+tpu_use_sudo: false
+use_cpu: false
diff --git a/lm-evaluation-harness/requirements.txt b/lm-evaluation-harness/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d6e1198b1ab1f5a7f19c6f1fc2ba7338438cf718
--- /dev/null
+++ b/lm-evaluation-harness/requirements.txt
@@ -0,0 +1 @@
+-e .
diff --git a/lm-evaluation-harness/tests/__init__.py b/lm-evaluation-harness/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lm-evaluation-harness/tests/test_cli.py b/lm-evaluation-harness/tests/test_cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..feaa7340d6a36da7c98f3c44128702fc33f0e770
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_cli.py
@@ -0,0 +1,43 @@
+import argparse
+
+import pytest
+
+import lm_eval.__main__
+
+
+def test_cli_parse_error():
+ """
+ Assert error raised if cli args argument doesn't have type
+ """
+ with pytest.raises(ValueError):
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument(
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
+ )
+ parser.add_argument(
+ "--tasks",
+ "-t",
+ default=None,
+ metavar="task1,task2",
+ help="To get full list of tasks, use the command lm-eval --tasks list",
+ )
+ lm_eval.__main__.check_argument_types(parser)
+
+
+def test_cli_parse_no_error():
+ """
+ Assert typed arguments are parsed correctly
+ """
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument(
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
+ )
+ parser.add_argument(
+ "--tasks",
+ "-t",
+ type=str,
+ default=None,
+ metavar="task1,task2",
+ help="To get full list of tasks, use the command lm-eval --tasks list",
+ )
+ lm_eval.__main__.check_argument_types(parser)
diff --git a/lm-evaluation-harness/tests/test_evaluator.py b/lm-evaluation-harness/tests/test_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..a41076a9aa103ec8df382558cf04d0f74f5ed559
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_evaluator.py
@@ -0,0 +1,65 @@
+# import lm_eval.base as base
+from typing import List
+
+import pytest
+
+# import lm_eval.models as models
+import lm_eval.api as api
+import lm_eval.evaluator as evaluator
+from lm_eval import tasks
+
+
+# TODO: more fine grained unit tests rather than this big honking integration
+# test once we break evaluator into smaller, more manageable pieces
+
+
+@pytest.mark.parametrize(
+ "task_name,limit,model,model_args",
+ [
+ (
+ ["arc_easy"],
+ 10,
+ "hf",
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
+ )
+ ],
+)
+def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str):
+ task_name = task_name
+ limit = 10
+
+ e1 = evaluator.simple_evaluate(
+ model=model,
+ tasks=task_name,
+ limit=limit,
+ model_args=model_args,
+ )
+ assert e1 is not None
+
+ lm = api.registry.get_model(model).create_from_arg_string(
+ model_args,
+ {
+ "batch_size": None,
+ "max_batch_size": None,
+ "device": None,
+ },
+ )
+ task_manager = tasks.TaskManager()
+ task_dict = tasks.get_task_dict(task_name, task_manager)
+
+ e2 = evaluator.evaluate(
+ lm=lm,
+ task_dict=task_dict,
+ limit=limit,
+ )
+
+ assert e2 is not None
+ # check that caching is working
+
+ def r(x):
+ return x["results"]["arc_easy"]
+
+ assert all(
+ x == y
+ for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
+ )
diff --git a/lm-evaluation-harness/tests/test_janitor.py b/lm-evaluation-harness/tests/test_janitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..19ba611dfb833f7fb8c864143df61687f8722022
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_janitor.py
@@ -0,0 +1,507 @@
+from collections import defaultdict
+
+from lm_eval.decontamination.janitor import (
+ Janitor,
+ form_ngrams,
+ split_indices,
+ word_ngrams,
+ word_ngrams_indices,
+)
+
+
+def simple_ngram(sequence, n):
+ ngrams = list()
+ ngram = []
+ for x in sequence:
+ ngram.append(x)
+ if len(ngram) == n:
+ ngrams.append(tuple(ngram))
+ ngram = ngram[1:]
+
+ return ngrams
+
+
+def test_form_ngrams():
+ sequence = (
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
+ )
+
+ n_values = [1, 2, 3, 5, 13]
+ for n in n_values:
+ comparison = simple_ngram(sequence, n)
+ result_to_test = list(form_ngrams(iter(sequence), n))
+ assert len(comparison) == len(result_to_test)
+ assert comparison == result_to_test
+
+
+def test_word_ngrams():
+ sequence = (
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
+ )
+
+ words = sequence.split()
+
+ n_values = [1, 2, 3, 5, 13]
+ for n in n_values:
+ comparison = simple_ngram(words, n)
+ comparison = [" ".join(ngram) for ngram in comparison]
+ result_to_test = list(word_ngrams(sequence, n))
+ assert len(comparison) == len(result_to_test)
+ assert result_to_test == comparison
+
+
+def test_split_indices():
+ sequence = (
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
+ )
+
+ comparison = []
+ current_word = ""
+ for i, c in enumerate(sequence):
+ if c != " ":
+ current_word += c
+ else:
+ if current_word:
+ comparison.append((current_word, (i - len(current_word), i - 1)))
+ current_word = ""
+
+ if current_word:
+ comparison.append(
+ (current_word, (len(sequence) - len(current_word), len(sequence) - 1))
+ )
+ current_word = ""
+
+ result_to_test = list(split_indices(sequence))
+ assert len(comparison) == len(result_to_test)
+ assert comparison == result_to_test
+
+
+def test_word_ngrams_indices():
+ sequence = (
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
+ )
+
+ n_values = [1, 2, 3, 5, 13]
+
+ for n in n_values:
+ ngrams = [" ".join(ngram) for ngram in simple_ngram(sequence.split(), n)]
+ tracker = defaultdict(int)
+ comparison = []
+ for ngram in ngrams:
+ while True:
+ start = sequence.find(ngram, tracker[ngram])
+ assert start != -1 # testing the test
+
+ end = start + len(ngram) - 1
+ tracker[ngram] = end + 1
+
+ # ignore partial word matches
+ if (start != 0 and sequence[start - 1] != " ") or (
+ end != len(sequence) - 1 and sequence[end + 1] != " "
+ ):
+ pass
+ else:
+ break
+
+ comparison.append((ngram, (start, end)))
+
+ result_to_test = list(word_ngrams_indices(sequence, n))
+ assert len(result_to_test) == len(comparison)
+ assert result_to_test == comparison
+
+
+# Assumptions from GPT3 Paper:
+# the 200 characters to remove include punctuation and is actually a half-window
+
+
+# All tests below initially test without any registered contaminants, expecting the same sequence back.
+def test_janitor1():
+ # First test using a 1gram and expected the first block before the filth to have some remaining
+ # characters, but the second block should be completely removed.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filth = "filth"
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ )
+
+ janitor = Janitor(
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == {filth}
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor2():
+ # Second test using a 1gram and expected the first block before the filth to have some remaining
+ # characters, and the second block is longer then 200 characters so should also have some remaining.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filth = "filth"
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ " characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ janitor = Janitor(
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == {filth}
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor3():
+ # Same test as above but with a 6gram.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filth = "filth lots of dirty filthy filth"
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ " characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ janitor = Janitor(
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == {filth}
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor4():
+ # This test adds another block to that from the previous. The middle block should be entirely
+ # removed as the 200 characters are removed from each side.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filth = "filth lots of dirty filthy filth"
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ " characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ janitor = Janitor(
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == {filth}
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor5():
+ # Same as above but using multiple different filth 6grams.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ " characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ janitor = Janitor(
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ for filth in filths:
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == set(filths)
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor6():
+ # Same as above but now we add 10 filths and expect the same result, the following test does 11.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
+
+ expected_result = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing "
+ " characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ janitor = Janitor(
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ for filth in filths:
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == set(filths)
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor7():
+ # Same as above but now we add 9 filths and expect the same result, the following test does 10.
+
+ sequence = (
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "FILTH. lots of dirty filtHy FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "FILTH. lots of filtHy dirty FIlTh "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
+ )
+
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
+
+ expected_result = ""
+
+ janitor = Janitor(
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
+ )
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == sequence
+
+ for filth in filths:
+ janitor.register_contaminant(filth)
+ assert janitor.dirt_ngrams == set(filths)
+
+ result = janitor.clean_python(sequence)
+ result = "".join(result)
+ assert result == expected_result
+
+
+def test_janitor8():
+ # This will test the save and load contams
+ pass
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
+ # contaminant = "dirty boy. Clean he he"
+
+ # jan = Janitor(ngram_n=3)
+ # jan.register_contaminant(contaminant)
+ # cleaned = " ".join(jan.clean(source))
+ # for contam in jan.dirt_ngrams:
+ # assert contam not in cleaned, contam
+
+ # filename = "data/saved_contam"
+ # jan.save_contamination_ngrams(filename)
+
+ # jan = Janitor(ngram_n=3)
+ # jan.load_contamination_ngrams(filename)
+ # cleaned = " ".join(jan.clean(source))
+ # for contam in jan.dirt_ngrams:
+ # assert contam not in cleaned, contam
diff --git a/lm-evaluation-harness/tests/test_misc.py b/lm-evaluation-harness/tests/test_misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..30267f63d0a518a41ae5681088878edcacc1c729
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_misc.py
@@ -0,0 +1,14 @@
+import random
+
+import pytest
+
+import lm_eval.api.metrics as metrics
+
+
+def test_bootstrapping():
+ random.seed(42)
+ arr = [random.random() for _ in range(1000)]
+ expected = metrics.mean_stderr(arr)
+ bootstrapped = metrics.bootstrap_stderr(metrics.mean, arr, iters=100000)
+
+ assert bootstrapped == pytest.approx(expected, abs=1e-4)
diff --git a/lm-evaluation-harness/tests/test_requests_caching.py b/lm-evaluation-harness/tests/test_requests_caching.py
new file mode 100644
index 0000000000000000000000000000000000000000..15d5724c9ceb6110349f0969a55e377282a3e16c
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_requests_caching.py
@@ -0,0 +1,123 @@
+# import lm_eval.base as base
+import importlib
+import os
+import sys
+from datetime import datetime
+from typing import List, Tuple
+
+import pytest
+import torch
+
+# import lm_eval.models as models
+from lm_eval.caching.cache import PATH
+
+
+MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
+
+# NOTE the script this loads uses simple evaluate
+# TODO potentially test both the helper script and the normal script
+sys.path.append(f"{MODULE_DIR}/../scripts")
+model_loader = importlib.import_module("requests_caching")
+run_model_for_task_caching = model_loader.run_model_for_task_caching
+
+
+DEFAULT_TASKS = ["lambada_openai", "hellaswag"]
+
+
+@pytest.fixture(autouse=True)
+def setup_and_teardown():
+ # Setup
+ torch.use_deterministic_algorithms(False)
+ clear_cache()
+ # Yields control back to the test function
+ yield
+ # Cleanup here
+
+
+def clear_cache():
+ if os.path.exists(PATH):
+ cache_files = os.listdir(PATH)
+ for file in cache_files:
+ file_path = f"{PATH}/{file}"
+ os.unlink(file_path)
+
+
+# leaving tasks here to allow for the option to select specific task files
+def get_cache_files(tasks: List[str] = None) -> Tuple[List[str], List[str]]:
+ cache_files = os.listdir(PATH)
+
+ file_task_names = []
+
+ for file in cache_files:
+ file_without_prefix = file.split("-")[1]
+ file_without_prefix_and_suffix = file_without_prefix.split(".")[0]
+ file_task_names.append(file_without_prefix_and_suffix)
+
+ return cache_files, file_task_names
+
+
+def assert_created(tasks: List[str], file_task_names: List[str]):
+ tasks.sort()
+ file_task_names.sort()
+
+ assert tasks == file_task_names
+
+
+@pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
+def test_requests_caching_true(tasks: List[str]):
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
+
+ cache_files, file_task_names = get_cache_files()
+
+ assert_created(tasks=tasks, file_task_names=file_task_names)
+
+
+@pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
+def test_requests_caching_refresh(tasks: List[str]):
+ run_model_for_task_caching(tasks=tasks, cache_requests="true")
+
+ timestamp_before_test = datetime.now().timestamp()
+
+ run_model_for_task_caching(tasks=tasks, cache_requests="refresh")
+
+ cache_files, file_task_names = get_cache_files()
+
+ for file in cache_files:
+ modification_time = os.path.getmtime(f"{PATH}/{file}")
+ assert modification_time > timestamp_before_test
+
+ tasks.sort()
+ file_task_names.sort()
+
+ assert tasks == file_task_names
+
+
+@pytest.mark.parametrize("tasks", [DEFAULT_TASKS])
+def test_requests_caching_delete(tasks: List[str]):
+ # populate the data first, rerun this test within this test for additional confidence
+ test_requests_caching_true(tasks=tasks)
+
+ run_model_for_task_caching(tasks=tasks, cache_requests="delete")
+
+ cache_files, file_task_names = get_cache_files()
+
+ assert len(cache_files) == 0
+
+
+# useful for locally running tests through the debugger
+if __name__ == "__main__":
+
+ def run_tests():
+ tests = [
+ test_requests_caching_true,
+ test_requests_caching_refresh,
+ test_requests_caching_delete,
+ ]
+
+ for test_func in tests:
+ clear_cache()
+ test_func(tasks=DEFAULT_TASKS)
+
+ print("Tests pass")
+
+ run_tests()
diff --git a/lm-evaluation-harness/tests/test_tasks.py b/lm-evaluation-harness/tests/test_tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..a11470c979b07191e4052dbddf8e4e89c3f5e3c4
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_tasks.py
@@ -0,0 +1,122 @@
+from itertools import islice
+
+import pytest
+
+import lm_eval.tasks as tasks
+from lm_eval.api.task import ConfigurableTask
+
+from .utils import new_tasks
+
+
+task_manager = tasks.TaskManager()
+# Default Task
+TASKS = ["arc_easy"]
+
+
+def task_class():
+ global TASKS
+ # CI: new_tasks checks if any modifications have been made
+ task_classes = new_tasks()
+ # Check if task_classes is empty
+ if task_classes:
+ return list(task_manager.load_task_or_group(task_classes).values())
+ else:
+ return list(task_manager.load_task_or_group(TASKS).values())
+
+
+@pytest.fixture()
+def limit() -> int:
+ return 10
+
+
+# Tests
+@pytest.mark.parametrize("task_class", task_class(), ids=lambda x: f"{x.config.task}")
+class TestNewTasks:
+ def test_download(self, task_class: ConfigurableTask):
+ task_class.download()
+ assert task_class.dataset is not None
+
+ def test_has_training_docs(self, task_class: ConfigurableTask):
+ assert task_class.has_training_docs() in [True, False]
+
+ def test_check_training_docs(self, task_class: ConfigurableTask):
+ if task_class.has_training_docs():
+ assert task_class._config["training_split"] is not None
+
+ def test_has_validation_docs(self, task_class):
+ assert task_class.has_validation_docs() in [True, False]
+
+ def test_check_validation_docs(self, task_class):
+ if task_class.has_validation_docs():
+ assert task_class._config["validation_split"] is not None
+
+ def test_has_test_docs(self, task_class):
+ assert task_class.has_test_docs() in [True, False]
+
+ def test_check_test_docs(self, task_class):
+ task = task_class
+ if task.has_test_docs():
+ assert task._config["test_split"] is not None
+
+ def test_should_decontaminate(self, task_class):
+ task = task_class
+ assert task.should_decontaminate() in [True, False]
+ if task.should_decontaminate():
+ assert task._config["doc_to_decontamination_query"] is not None
+
+ def test_doc_to_text(self, task_class, limit):
+ task = task_class
+ arr = (
+ list(islice(task.test_docs(), limit))
+ if task.has_test_docs()
+ else list(islice(task.validation_docs(), limit))
+ )
+ _array = [task.doc_to_text(doc) for doc in arr]
+ # space convention; allow txt to have length 0 for perplexity-like tasks since the model tacks an <|endoftext|> on
+ assert all(
+ isinstance(x, str) and (x[-1] != " " if len(x) != 0 else True)
+ for x in _array
+ )
+
+ def test_create_choices(self, task_class, limit):
+ task = task_class
+ arr = (
+ list(islice(task.test_docs(), limit))
+ if task.has_test_docs()
+ else list(islice(task.validation_docs(), limit))
+ )
+ if "multiple_choice" in task._config.output_type:
+ _array = [task.doc_to_choice(doc) for doc in arr]
+ # assert all(len(x) == 4 for x in _array)
+ assert all(isinstance(x, list) for x in _array)
+ assert all(isinstance(x[0], str) for x in _array)
+
+ def test_doc_to_target(self, task_class, limit):
+ task = task_class
+ arr = (
+ list(islice(task.test_docs(), limit))
+ if task.has_test_docs()
+ else list(islice(task.validation_docs(), limit))
+ )
+ _array_target = [task.doc_to_target(doc) for doc in arr]
+ if task._config.output_type == "multiple_choice":
+ assert all(isinstance(label, int) for label in _array_target)
+ # _array_text = [task.doc_to_text(doc) for doc in arr]
+ # Not working
+ # assert all(tgt[0] == " " or txt[-1] == "\n" if len(txt) != 0 else True for txt, tgt in zip(_array_text, _array_target))
+
+ def test_build_all_requests(self, task_class, limit):
+ task_class.build_all_requests(rank=1, limit=limit, world_size=1)
+ assert task_class.instances is not None
+
+ # ToDO: Add proper testing
+ def test_construct_requests(self, task_class, limit):
+ task = task_class
+ arr = (
+ list(islice(task.test_docs(), limit))
+ if task.has_test_docs()
+ else list(islice(task.validation_docs(), limit))
+ )
+ requests = [task.construct_requests(doc, task.doc_to_text(doc)) for doc in arr]
+ # assert all(isinstance(doc, list) for doc in requests)
+ assert len(requests) == limit if limit else True
diff --git a/lm-evaluation-harness/tests/test_utils.py b/lm-evaluation-harness/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a48600ca980c9a28568dcbca9489ae3581c21872
--- /dev/null
+++ b/lm-evaluation-harness/tests/test_utils.py
@@ -0,0 +1,400 @@
+import itertools
+
+import numpy as np
+import pytest
+import torch
+
+from lm_eval.api.metrics import (
+ aggregate_subtask_metrics,
+ mean,
+ pooled_sample_stderr,
+ stderr_for_metric,
+)
+from lm_eval.models.utils import Collator
+from lm_eval.utils import (
+ get_rolling_token_windows,
+ make_disjoint_window,
+)
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v1():
+ gold = [
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ (
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+ ),
+ (
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+ ),
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [30, 31, 32, 33]),
+ ]
+ x = list(range(34))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=10,
+ context_len=1,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v2():
+ gold = [
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [10, 11, 12]),
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [13, 14, 15]),
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [16, 17, 18]),
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [19, 20, 21]),
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [22, 23, 24]),
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [25, 26, 27]),
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [28, 29, 30]),
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [31, 32, 33]),
+ ]
+ x = list(range(34))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=10,
+ context_len=8,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v3():
+ gold = [
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
+ ([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30]),
+ ([21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31]),
+ ([22, 23, 24, 25, 26, 27, 28, 29, 30, 31], [32]),
+ ([23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33]),
+ ]
+ x = list(range(34))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=10,
+ context_len=10,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v4():
+ gold = [
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10]),
+ ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11]),
+ ([2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12]),
+ ([3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [13]),
+ ([4, 5, 6, 7, 8, 9, 10, 11, 12, 13], [14]),
+ ([5, 6, 7, 8, 9, 10, 11, 12, 13, 14], [15]),
+ ([6, 7, 8, 9, 10, 11, 12, 13, 14, 15], [16]),
+ ([7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [17]),
+ ([8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [18]),
+ ([9, 10, 11, 12, 13, 14, 15, 16, 17, 18], [19]),
+ ([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20]),
+ ([11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21]),
+ ([12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22]),
+ ([13, 14, 15, 16, 17, 18, 19, 20, 21, 22], [23]),
+ ([14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24]),
+ ([15, 16, 17, 18, 19, 20, 21, 22, 23, 24], [25]),
+ ([16, 17, 18, 19, 20, 21, 22, 23, 24, 25], [26]),
+ ([17, 18, 19, 20, 21, 22, 23, 24, 25, 26], [27]),
+ ([18, 19, 20, 21, 22, 23, 24, 25, 26, 27], [28]),
+ ([19, 20, 21, 22, 23, 24, 25, 26, 27, 28], [29]),
+ ]
+ x = list(range(30))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=10,
+ context_len=10,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v5():
+ gold = [
+ ([-100, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ (
+ [9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
+ [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
+ ),
+ (
+ [19, 20, 21, 22, 23, 24, 25, 26, 27, 28],
+ [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
+ ),
+ ]
+ x = list(range(30))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=10,
+ context_len=1,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+# noinspection DuplicatedCode
+def test_get_rolling_token_windows_v6():
+ gold = [
+ ([-100, 0], [0, 1]),
+ ([1, 2], [2, 3]),
+ ([3, 4], [4, 5]),
+ ([5, 6], [6, 7]),
+ ([6, 7], [8]),
+ ]
+ x = list(range(9))
+ generator = get_rolling_token_windows(
+ token_list=x,
+ prefix_token=-100,
+ max_seq_len=2,
+ context_len=1,
+ )
+ pred_length = 0
+ output = []
+ for input_tokens, pred_tokens in generator:
+ output.append((input_tokens, pred_tokens))
+ pred_length += len(pred_tokens)
+ assert pred_length == len(x)
+ assert gold == output
+
+
+def test_get_rolling_token_windows_empty():
+ generator = get_rolling_token_windows(
+ token_list=[],
+ prefix_token=-100,
+ max_seq_len=2,
+ context_len=1,
+ )
+ n = 0
+ for _ in generator:
+ n += 1
+ assert n == 0
+
+
+def test_make_disjoint_window():
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])) == (
+ [1],
+ [2, 3, 4, 5, 6],
+ )
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [4, 5, 6])) == ([1, 2, 3], [4, 5, 6])
+ assert make_disjoint_window(([1, 2, 3, 4, 5], [6])) == ([1, 2, 3, 4, 5], [6])
+
+
+class TestCollator:
+ def make_generate_sample(self, end=10):
+ strings = ["x" * i for i in range(1, end + 1)]
+ gen_kwargs1, gen_kwargs2 = (
+ {"temperature": 0},
+ {"temperature": 0, "until": ["nn", "\n\n"]},
+ )
+ args = [
+ (string, gen_kwargs1 if i < len(strings) // 2 else gen_kwargs2)
+ for i, string in enumerate(strings)
+ ]
+
+ return args
+
+ def make_loglikelihood_sample(self, end=11):
+ samples = [
+ (("x", "x"), list(range(1, total_length + 1)))
+ for total_length in range(1, end + 1)
+ ]
+ return samples
+
+ def make_loglikelihood_sample_group(self, end=11):
+ a = [(("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x]) for x in range(9)]
+ b = [
+ (("x", "x"), [1, 2, 3, 4, 5, 6, 7, 8], [x, y, z])
+ for x, y, z in zip(range(9), range(9, 18), range(18, 27))
+ ]
+ return a + b
+
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 9)])
+ def test_generations(self, batch_size, end):
+ _collate_gen = lambda x: (-len(x[0]), x[0]) # noqa: E731
+
+ generation_samples = self.make_generate_sample(int(end))
+ gens = Collator(generation_samples, _collate_gen, group_by="gen_kwargs")
+ chunks = gens.get_batched(n=int(batch_size), batch_fn=None)
+ output = []
+ for chunks in chunks:
+ # check batching
+ group_one = end // 2
+ group_two = end - end // 2
+ assert (
+ len(chunks) <= batch_size
+ if batch_size != 0
+ else len(chunks) in [group_one, group_two]
+ )
+ # check if reorder-er is working correctly
+ assert all(
+ len(chunks[i][0]) <= len(chunks[i - 1][0])
+ for i in range(1, len(chunks))
+ )
+ # check if grouping correctly
+ assert all(x[1] == chunks[0][1] for x in chunks)
+ for x in chunks:
+ output.append(x)
+ reordered_output = gens.get_original(output)
+ # check get original
+ assert reordered_output == generation_samples
+
+ @pytest.mark.parametrize("batch_size, end", [(17, 30), (8, 61), (12, 48), (0, 3)])
+ def test_loglikelihood(self, batch_size, end):
+ _collate_log = lambda x: (-len(x[1]), tuple(x[1])) # noqa: E731
+ loglikelihood_samples = self.make_loglikelihood_sample(int(end))
+ loglikelihoods = Collator(
+ loglikelihood_samples,
+ _collate_log,
+ )
+ chunks = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
+ output = []
+ for chunks in chunks:
+ # check batching
+ assert len(chunks) <= batch_size if batch_size != 0 else len(chunks) == end
+ # check reorder
+ assert all(
+ len(chunks[i][1]) <= len(chunks[i - 1][1])
+ for i in range(1, len(chunks))
+ )
+ for x in chunks:
+ output.append(x[1])
+ # check indices
+ reordered_output = loglikelihoods.get_original(output)
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
+
+ @pytest.mark.parametrize("batch_size", [17, 8, 12, 0])
+ def test_context_grouping(self, batch_size):
+ def _collate(x):
+ toks = x[1] + x[2]
+ return -len(toks), tuple(toks)
+
+ _collate_log = _collate # noqa: E731
+ loglikelihood_samples = self.make_loglikelihood_sample_group()
+ loglikelihoods = Collator(
+ loglikelihood_samples,
+ _collate_log,
+ group_fn=lambda a: a[-2] + a[-1][:-1],
+ group_by="contexts",
+ )
+ chunks = loglikelihoods.get_batched(n=int(batch_size), batch_fn=None)
+ output = []
+ outputs_ = []
+ for chunks in chunks:
+ # check batching
+ if batch_size != 0:
+ assert len(chunks) <= batch_size
+ # check reorder
+ assert all(
+ len(chunks[i][1]) <= len(chunks[i - 1][1])
+ for i in range(1, len(chunks))
+ )
+ for x in chunks:
+ for request_str, cont_toks, logits in loglikelihoods.get_cache(
+ req_str="".join(x[0]),
+ cxt_toks=x[1],
+ cont_toks=x[2],
+ logits=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
+ .unsqueeze(0)
+ .unsqueeze(0),
+ ):
+ output.append(x[1])
+ outputs_.append(cont_toks)
+ assert len(output) == len(outputs_)
+ # check indices
+ reordered_output = loglikelihoods.get_original(output)
+ assert reordered_output == [x[1] for x in loglikelihood_samples]
+
+
+def test_aggregate_mean():
+ # test weight_by_size is respected
+ assert (
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=False)
+ == 0.3
+ )
+ assert (
+ aggregate_subtask_metrics([0.3, 0.2, 0.4], [20, 40, 100], weight_by_size=True)
+ == 0.3375
+ )
+
+
+@pytest.mark.parametrize(
+ "samples",
+ [
+ [40 * [1.0] + 60 * [0.0], 30 * [1.0] + 30 * [0.0], 20 * [1.0] + 60 * [0.0]],
+ [35 * [1.0] + 65 * [0.0], 20 * [1.0] + 20 * [0.0]],
+ ],
+)
+def test_aggregate_stderrs(samples):
+ # check that aggregating subtasks' bootstrap stderrs with our formula
+ # (using weight_by_size) is ~equiv.
+ # to just getting bootstrap stderr of the whole set of samples
+ mean_stderr = stderr_for_metric(metric=mean, bootstrap_iters=100000)
+
+ stderrs = [mean_stderr(subtask) for subtask in samples]
+
+ sizes = [len(subtask) for subtask in samples]
+
+ assert np.allclose(
+ pooled_sample_stderr(stderrs, sizes),
+ mean_stderr(list(itertools.chain.from_iterable(samples))),
+ atol=1.0e-3,
+ )
diff --git a/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..da909529e5ae766814dc24d28e65ef3df4e7109c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood
@@ -0,0 +1 @@
+9852b38612db8c6adf938a5d8a7a9e5ce9e655259d6cc806b142506fcaff0ed4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..48652c4689e2be24972881d0abff497d203ace9a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-loglikelihood
@@ -0,0 +1 @@
+8021db8de46850090ddae6e6ec2d382029c3027b7c69884607503f916d09b709
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..13b76c1d5f94218128b2038d55bd300faf66ff44
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-electrical_engineering": {"acc": 0.2689655172413793, "acc_norm": 0.2827586206896552, "acc_norm_stderr": 0.037528339580033376, "acc_stderr": 0.036951833116502325}}, "versions": {"hendrycksTest-electrical_engineering": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..b3d3ae438c1fc59930d1d4ba053d73c38b6d9c07
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood
@@ -0,0 +1 @@
+4b07922fa1d549b655c21440b13d869263ce7dd9771d8147c450f11c91d26c10
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..6e5f1efe495f7030764f96e45460a4d47315b1e3
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_pubmed-central": {"bits_per_byte": 1.5812411832795375e-05, "byte_perplexity": 1.0000158125368497, "word_perplexity": 1.000123107107861}}, "versions": {"pile_pubmed-central": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qqp-v0-res.json b/lm-evaluation-harness/tests/testdata/qqp-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..b7b31355e644bd9d6d57758ee9a454598445f7c9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qqp-v0-res.json
@@ -0,0 +1 @@
+{"results": {"qqp": {"acc": 0.49782339846648527, "acc_stderr": 0.0024866770696239894, "f1": 0.42322661288031593, "f1_stderr": 0.002695903831328166}}, "versions": {"qqp": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..4344b7cd8a1a9bfb8cd60e2aa0ece17f530f7d3d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-ja-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-ja-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.010703148854351403, "chrf_stderr": 0.00022242113108130186, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ja-en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..27c60fb72194325e51647ce0fe137710df6dff86
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-greedy_until
@@ -0,0 +1 @@
+1477ab6542c26bd0222cc1aded174f33bf8d04d1cf6a1c0959aeca4ff3779adc
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/utils.py b/lm-evaluation-harness/tests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3418206af9900f95286917050446252e5a289c9
--- /dev/null
+++ b/lm-evaluation-harness/tests/utils.py
@@ -0,0 +1,49 @@
+import os
+from typing import List, Union
+
+from lm_eval.utils import load_yaml_config
+
+
+# {{{CI}}}
+# This is the path where the output for the changed files for the tasks folder is stored
+# FILE_PATH = file_path = ".github/outputs/tasks_all_changed_and_modified_files.txt"
+
+
+# reads a text file and returns a list of words
+# used to read the output of the changed txt from tj-actions/changed-files
+def load_changed_files(file_path: str) -> List[str]:
+ with open(file_path, "r") as f:
+ content = f.read()
+ words_list = [x for x in content.split()]
+ return words_list
+
+
+# checks the txt file for list of changed files.
+# if file ends with .yaml then check yaml and load the config.
+# if the config task is a string, it's a task config.
+# if the config task is a list, it's a group config.
+def parser(full_path: List[str]) -> List[str]:
+ _output = set()
+ for x in full_path:
+ if os.path.exists(x) and x.endswith(".yaml"):
+ config = load_yaml_config(x, mode="simple")
+ if isinstance(config["task"], str):
+ _output.add(config["task"])
+ elif isinstance(config["task"], list):
+ _output.add(config["group"])
+ return list(_output)
+
+
+def new_tasks() -> Union[List[str], None]:
+ FILENAME = ".github/outputs/tasks_all_changed_and_modified_files.txt"
+ if os.path.exists(FILENAME):
+ # If tasks folder has changed then we get the list of files from FILENAME
+ # and parse the yaml files to get the task names.
+ return parser(load_changed_files(FILENAME))
+ elif os.getenv("API") is not None:
+ # Or if API has changed then we set the ENV variable API to True
+ # and run given tasks.
+ return ["arc_easy", "hellaswag", "piqa", "wikitext"]
+ # if both not true just do arc_easy
+ else:
+ return
diff --git a/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png
new file mode 100644
index 0000000000000000000000000000000000000000..7f9cc7cf23bec219b8d6101c4cbae235a2c678d1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_or.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e41ba0d3dbf2a20f82bb79a4cbba5bb458dec396ccbdba5ed195d6b200ca7f2e
+size 8809
diff --git a/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png
new file mode 100644
index 0000000000000000000000000000000000000000..cafdc56f650a8c4d7af38fdfd8206891aa9d6cc2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:92e71558103d03df0ea5c47876277968b5d4ca8ab8cf43b80b73cce9d962052c
+size 10002
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1b21c7d2f7126363420aad2bf2e71db07570111
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c31a7ee3d98eb2acf6f52a5efcad72d58f6976fe
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9945d9e3777cb2dbdd283dddf397ea4ce40943de
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/checkpoint_reshaping_and_interoperability.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..adc495eec5aced0e9d45206ab0f0d803075a0f01
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/__pycache__/convert_megatron_gpt2_checkpoint.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ccfb4dcb1ff8f0245dc870b0dbc146d07284c8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py
@@ -0,0 +1,904 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import json
+import os
+import re
+import sys
+import types
+
+import torch
+
+from transformers import AutoTokenizer, GPT2Config
+from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME, shard_checkpoint
+
+
+def add_checkpointing_args(parser):
+ parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository")
+ parser.add_argument(
+ "--convert_checkpoint_from_megatron_to_transformers",
+ action="store_true",
+ help=(
+ "If True, convert a Megatron checkpoint to a Transformers checkpoint. "
+ "If False, convert a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--load_path",
+ type=str,
+ required=True,
+ help="Path to the checkpoint to convert.",
+ )
+ parser.add_argument(
+ "--save_path",
+ type=str,
+ required=True,
+ help="Path to the converted checkpoint.",
+ )
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
+ return parser
+
+
+def add_megatron_checkpoint_args(parser):
+ parser.add_argument(
+ "--target_tensor_model_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The tensor model parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_pipeline_model_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The pipeline model parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_data_parallel_size",
+ type=int,
+ default=1,
+ help=(
+ "The data parallel size of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--target_params_dtype",
+ type=str,
+ default="fp32",
+ help=(
+ "The dtype of the converted checkpoint. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--make_vocab_size_divisible_by",
+ type=int,
+ default=128,
+ help=(
+ "Pad the vocab size to be divisible by this value. "
+ "This is added for computational efficieny reasons. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--use_distributed_optimizer",
+ action="store_true",
+ help=(
+ "If True, use the distributed optimizer. "
+ "Only used when converting a Transformers checkpoint to a Megatron checkpoint."
+ ),
+ )
+ return parser
+
+
+def add_transformers_checkpoint_args(parser):
+ parser.add_argument(
+ "--tokenizer_name",
+ type=str,
+ default=None,
+ help=(
+ "The name of the pre-trained tokenizer to save. "
+ "If not None, the tokenizer will be saved. "
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
+ ),
+ )
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="10GB",
+ help=(
+ "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size "
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). "
+ "Only used when converting a Megatron checkpoint to a Transformers checkpoint."
+ ),
+ )
+
+ return parser
+
+
+# The simple map of names for "automated" rules.
+megatron_to_transformers = {
+ "attention.dense": ".attn.c_proj.",
+ "self_attention.dense": ".attn.c_proj.",
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
+}
+transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()}
+
+tensor_parallel_params = [
+ # megatron-lm layers to merge across tp ranks
+ "self_attention.query_key_value.weight",
+ "self_attention.query_key_value.bias",
+ "self_attention.dense.weight",
+ "mlp.dense_h_to_4h.weight",
+ "mlp.dense_h_to_4h.bias",
+ "mlp.dense_4h_to_h.weight",
+ # deprecated
+ "attention.query_key_value.weight",
+ "attention.query_key_value.bias",
+ "attention.dense.weight",
+ # transformers layers to split across tp ranks
+ "attn.c_attn.weight",
+ "attn.c_attn.bias",
+ "attn.c_proj.weight",
+ "mlp.c_fc.weight",
+ "mlp.c_fc.bias",
+ "mlp.c_proj.weight",
+]
+
+
+def recursive_print(name, val, spaces=0):
+ """
+ Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ name (str): the name of the current tensor parameter
+ val (Tuple(int)): the shape of the current tensor parameter
+ spaces (int): the number of spaces to print before the output for a nested structure
+ """
+ # Format the message.
+ if name is None:
+ msg = None
+ else:
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
+ msg = fmt.format(name)
+
+ # Print and recurse (if needed).
+ if isinstance(val, dict):
+ if msg is not None:
+ print(msg)
+ for k in val.keys():
+ recursive_print(k, val[k], spaces + 2)
+ elif isinstance(val, torch.Tensor):
+ print(msg, ":", val.size())
+ else:
+ print(msg, ":", val)
+
+
+def megatron_to_transformers_fix_query_key_value_ordering(
+ param, checkpoint_version, num_splits, num_heads, hidden_size
+):
+ """
+ Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions
+ of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints:
+ https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the
+ self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2.
+ This function is taken from `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ param (torch.Tensor): the tensor to permute
+ checkpoint_version (int): the version of the checkpoint.
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
+ num_heads (int): the number of attention heads
+ hidden_size (int): the hidden size per head
+ """
+
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+def transformers_to_megatron_fix_query_key_value_ordering(
+ param, checkpoint_version, num_splits, num_heads, hidden_size
+):
+ """
+ Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input
+ is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version
+ 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the
+ self-attention block, the param needs to be already transposed before calling this function.
+
+ Args:
+ param (torch.Tensor): the tensor to permute
+ checkpoint_version (int): the version of the checkpoint.
+ num_splits (int): the number of projections, usually 3 for (Query, Key, Value)
+ num_heads (int): the number of attention heads
+ hidden_size (int): the hidden size per head
+ """
+
+ # Input is [num_splits * num_heads * hidden_size, :]
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
+ param = param.view(*current_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:]
+ param = param.view(*current_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+def merge_transformers_sharded_states(path, num_checkpoints):
+ """
+ Merge sharded checkpoints from transformers into a single checkpoint.
+
+ Args:
+ path (str): the path to the sharded checkpoints
+ num_checkpoints (int): the number of checkpoints to merge
+ """
+ state_dict = {}
+ for i in range(1, num_checkpoints + 1):
+ checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin")
+ current_chunk = torch.load(checkpoint_path, map_location="cpu")
+ state_dict.update(current_chunk)
+ return state_dict
+
+
+def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank):
+ """
+ Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline
+ parallel size and pipeline parallel rank.
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+ tp_size (int): the tensor parallel size
+ pp_size (int): the pipeline parallel size
+ pp_rank (int): the pipeline parallel rank
+ """
+ tp_state_dicts = []
+ for i in range(tp_size):
+ sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}"
+ for checkpoint_name in ["model_optim_rng.pt", "model_rng.pt"]:
+ checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name)
+ if os.path.isfile(checkpoint_path):
+ break
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
+ tp_state_dicts.append(state_dict)
+ return tp_state_dicts
+
+
+def get_element_from_dict_by_path(d, path):
+ """
+ Get element from dictionary by path. If element is not present, recursively add empty dictionaries.
+
+ Args:
+ d (dict): the dictionary to get the element from
+ path (list): the path to the element which is delimited by "."
+ """
+ path = path.split(".")
+ for k in path:
+ if k not in d:
+ d[k] = {}
+ d = d[k]
+ return d
+
+
+def convert_checkpoint_from_megatron_to_transformers(args):
+ """
+ Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints
+ with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards
+ using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of
+ `convert_megatron_gpt2_checkpoint.py`
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+ """
+ # Load Megatron-LM checkpoint arguments from the state dict
+ sub_dirs = os.listdir(args.load_path)
+ possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"]
+ for sub_dir in possible_sub_dirs:
+ if sub_dir in sub_dirs:
+ rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0]
+ rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name)
+ break
+ print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}")
+ state_dict = torch.load(rank0_checkpoint_path, map_location="cpu")
+ megatron_args = state_dict.get("args", None)
+ if megatron_args is None:
+ raise ValueError(
+ "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints"
+ " containing all the megatron arguments. This is because it loads all config related to model"
+ " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to"
+ " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron"
+ " arguments to use this utility."
+ )
+
+ # Create Transformers GPT2 config from Megatron-LM arguments
+ if megatron_args is not None:
+ if megatron_args.bias_gelu_fusion:
+ activation_function = "gelu_fast"
+ elif megatron_args.openai_gelu:
+ activation_function = "gelu_new"
+ else:
+ activation_function = "gelu"
+ else:
+ # in the very early days this used to be "gelu_new"
+ activation_function = "gelu_new"
+ vocab_size = (
+ megatron_args.padded_vocab_size
+ if getattr(megatron_args, "orig_vocab_size", None) is None
+ else megatron_args.orig_vocab_size
+ )
+ print(vocab_size)
+
+ config = GPT2Config(
+ vocab_size=vocab_size,
+ n_positions=megatron_args.max_position_embeddings,
+ n_embd=megatron_args.hidden_size,
+ n_layer=megatron_args.num_layers,
+ n_head=megatron_args.num_attention_heads,
+ n_inner=megatron_args.ffn_hidden_size,
+ activation_function=activation_function,
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ scale_attn_weights=True,
+ use_cache=True,
+ bos_token_id=vocab_size - 1,
+ eos_token_id=vocab_size - 1,
+ architectures=["GPT2LMHeadModel"],
+ )
+
+ output_state_dict = {}
+
+ checkpoint_version = state_dict.get("checkpoint_version", 0.0)
+ tp_size = megatron_args.tensor_model_parallel_size
+ pp_size = megatron_args.pipeline_model_parallel_size
+ dtype = torch.float32
+ # The regex to extract layer names.
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+
+ # Convert.
+ print("Converting")
+
+ # Embeddings
+ print("Converting embeddings")
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0)
+
+ # Convert and store the position embeddings.
+ position_embeddings = get_element_from_dict_by_path(
+ tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight"
+ )
+ output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype)
+
+ # Convert and store the word embeddings.
+ word_embeddings = torch.cat(
+ [
+ get_element_from_dict_by_path(
+ tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight"
+ )
+ for tp_rank in range(tp_size)
+ ],
+ dim=0,
+ )
+ word_embeddings = word_embeddings[:vocab_size].to(dtype)
+ output_state_dict["transformer.wte.weight"] = word_embeddings
+
+ # Transformer Layers
+ print("Converting transformer layers")
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ n_positions = config.n_positions
+ num_layers = config.num_hidden_layers // pp_size
+
+ for pp_rank in range(pp_size):
+ if pp_size > 0:
+ print(f"Converting pipeline parallel rank {pp_rank}")
+ tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank)
+
+ # The transformer.
+ path = (
+ "model.language_model.transformer"
+ if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys()
+ else "model.language_model.encoder"
+ )
+ # Extract the layers.
+ for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items():
+ # Match the name.
+ m = layer_re.match(key)
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ layer_idx = int(m.group(1)) + pp_rank * num_layers
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ # The name of the layer.
+ layer_name = f"transformer.h.{layer_idx}"
+
+ if op_name + "." + weight_or_bias not in tensor_parallel_params:
+ params = val.to(dtype)
+ else:
+ dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0
+ params = torch.cat(
+ [val]
+ + [
+ get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key]
+ for tp_rank in range(1, tp_size)
+ ],
+ dim=dim,
+ ).to(dtype)
+
+ # For layernorm(s), simply store the layer norm.
+ if op_name.endswith("layernorm"):
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params
+
+ # Transpose the QKV matrix.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "weight":
+ # Insert a tensor of 1x1xDxD bias.
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view(
+ 1, 1, n_positions, n_positions
+ )
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
+
+ # Insert a "dummy" tensor for masked_bias.
+ masked_bias = torch.tensor(-1e4, dtype=dtype)
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
+
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
+ params,
+ checkpoint_version,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
+ out_val = out_val.transpose(0, 1).contiguous()
+ # Store.
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
+
+ # Transpose the bias.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "bias":
+ out_val = megatron_to_transformers_fix_query_key_value_ordering(
+ params, checkpoint_version, 3, heads, hidden_size_per_head
+ )
+ # Store. No change of shape.
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
+
+ # Transpose the weights.
+ elif weight_or_bias == "weight":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1)
+
+ # Copy the bias.
+ elif weight_or_bias == "bias":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "bias"] = params
+
+ if config.n_layer != (layer_idx + 1):
+ raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}")
+
+ # The final layernorm.
+ print("Converting final layernorm")
+ params = get_element_from_dict_by_path(tp_state_dicts[0], str(path))
+ output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype)
+ output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype)
+
+ # For LM head, transformers' wants the matrix to weight embeddings.
+ print("Converting LM head")
+ output_state_dict["lm_head.weight"] = word_embeddings.to(dtype)
+
+ # It should be done!
+ print("Conversion from Megatron-LM to Transformers is done!")
+
+ # Print the structure of converted state dict.
+ if args.print_checkpoint_structure:
+ recursive_print(None, output_state_dict)
+
+ # Add tokenizer class info to config
+ # see https://github.com/huggingface/transformers/issues/13906)
+
+ if args.tokenizer_name is None:
+ tokenizer_name = "openai-community/gpt2"
+ else:
+ tokenizer_name = args.tokenizer_name
+
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
+ tokenizer_class = type(tokenizer).__name__
+ config.tokenizer_class = tokenizer_class
+
+ # Store the config to file.
+ print("Saving config")
+ config.save_pretrained(args.save_path)
+
+ # Save tokenizer based on args
+ if args.tokenizer_name is not None:
+ print(f"Adding {tokenizer_class} tokenizer files")
+ tokenizer.save_pretrained(args.save_path)
+
+ # Store the state_dict to file.
+ max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size
+ shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size)
+
+ # Save the model
+ for shard_file, shard in shards.items():
+ torch.save(shard, os.path.join(args.save_path, shard_file))
+
+ if index is None:
+ print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}")
+ else:
+ save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME)
+ # Save the index as well
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+ print(
+ f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be "
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
+ f"index located at {save_index_file}."
+ )
+
+
+def convert_checkpoint_from_transformers_to_megatron(args):
+ """
+ Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable
+ tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers
+ which can have multiple shards.
+
+ Args:
+ args (argparse.Namespace): the arguments to the script
+
+ """
+ os.makedirs(args.save_path, exist_ok=True)
+ # Search in directory above this
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
+ if args.megatron_path is not None:
+ sys.path.insert(0, args.megatron_path)
+
+ try:
+ from megatron.tokenizer.tokenizer import _vocab_size_with_padding
+ except ModuleNotFoundError:
+ print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.")
+ exit(1)
+
+ # load the transformers model state dict and config
+ sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")]
+ if len(sub_dirs) == 1:
+ checkpoint_name = "pytorch_model.bin"
+ state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu")
+ else:
+ num_checkpoints = len(sub_dirs) - 1
+ state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints)
+
+ config = GPT2Config.from_pretrained(args.load_path)
+
+ # Saving the tracker file
+ tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt")
+ with open(tracker_filepath, "w") as f:
+ f.write("release")
+
+ # create `release` dir in args.load_path
+ release_dir = os.path.join(args.save_path, "release")
+ os.makedirs(release_dir, exist_ok=True)
+
+ # megatron args
+ megatron_args = {
+ "orig_vocab_size": config.vocab_size,
+ "max_position_embeddings": config.n_positions,
+ "hidden_size": config.n_embd,
+ "num_layers": config.n_layer,
+ "num_attention_heads": config.n_head,
+ "ffn_hidden_size": config.n_inner,
+ "tensor_model_parallel_size": args.target_tensor_model_parallel_size,
+ "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size,
+ "data_parallel_size": args.target_data_parallel_size,
+ "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by,
+ "rank": 0,
+ "tokenizer_type": "GPT2BPETokenizer",
+ }
+
+ if config.activation_function == "gelu":
+ megatron_args["bias_gelu_fusion"] = False
+ megatron_args["openai_gelu"] = False
+ elif config.activation_function == "gelu_fast":
+ megatron_args["bias_gelu_fusion"] = True
+ megatron_args["openai_gelu"] = False
+ elif config.activation_function == "gelu_new":
+ megatron_args["bias_gelu_fusion"] = False
+ megatron_args["openai_gelu"] = True
+
+ margs = types.SimpleNamespace()
+ for k, v in megatron_args.items():
+ setattr(margs, k, v)
+
+ # params dtype
+ if args.target_params_dtype == "fp16":
+ dtype = torch.float16
+ elif args.target_params_dtype == "bf16":
+ dtype = torch.bfloat16
+ else:
+ dtype = torch.float32
+ setattr(margs, "params_dtype", dtype)
+
+ # save dummy optim state dict
+ dummy_optim_state_dict = {}
+ dummy_optim_state_dict["optimizer"] = {
+ "step": 0,
+ "param_groups": [
+ {
+ "lr": 0.0,
+ "beta1": 0.0,
+ "beta2": 0.0,
+ "eps": 0.0,
+ "weight_decay": 0.0,
+ "correct_bias": False,
+ "params": [],
+ }
+ ],
+ }
+ if args.use_distributed_optimizer:
+ for i in range(args.target_pipeline_model_parallel_size):
+ for j in range(args.target_tensor_model_parallel_size):
+ for k in range(args.target_data_parallel_size):
+ if args.target_pipeline_model_parallel_size == 1:
+ checkpoint_dir = f"mp_rank_{j:02d}_{k:03d}"
+ else:
+ checkpoint_dir = f"mp_rank_{j:02d}_{i:03d}_{k:03d}"
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
+ os.makedirs(checkpoint_dir, exist_ok=True)
+ torch.save(
+ dummy_optim_state_dict,
+ os.path.join(checkpoint_dir, "optim.pt"),
+ )
+
+ # Convert.
+ print("Converting")
+ output_state_dict = []
+ for i in range(args.target_tensor_model_parallel_size):
+ output_state_dict.append({})
+
+ # Embedding layer
+ print("converting embedding layer")
+ pos_embedding = state_dict["transformer.wpe.weight"].to(dtype)
+ word_embedding = state_dict["transformer.wte.weight"].to(dtype)
+ orig_vocab_size = config.vocab_size
+ padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs)
+ setattr(margs, "padded_vocab_size", padded_vocab_size)
+ # Cut out extra padding we don't need
+ if orig_vocab_size > padded_vocab_size:
+ full_word_embed = word_embedding[0:padded_vocab_size, :]
+ # Expanding embedding to larger size by replicating final entry
+ elif orig_vocab_size < padded_vocab_size:
+ padding_size = padded_vocab_size - orig_vocab_size
+ full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1)))
+ # Same size!
+ else:
+ full_word_embed = word_embedding
+
+ # Split into new tensor model parallel sizes
+ out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0)
+ for i in range(args.target_tensor_model_parallel_size):
+ pos_emb_dict = get_element_from_dict_by_path(
+ output_state_dict[i], "model.language_model.embedding.position_embeddings"
+ )
+ pos_emb_dict["weight"] = pos_embedding
+
+ word_emb_dict = get_element_from_dict_by_path(
+ output_state_dict[i], "model.language_model.embedding.word_embeddings"
+ )
+ word_emb_dict["weight"] = out_word_embed[i].clone()
+
+ # Transformer layers
+ print("converting transformer layers")
+ if config.num_attention_heads % args.target_tensor_model_parallel_size != 0:
+ raise ValueError(
+ f"Number of attention heads ({config.num_attention_heads}) must be divisible by number of tensor parallelism"
+ f" ({args.target_tensor_model_parallel_size})"
+ )
+
+ if config.num_hidden_layers % args.target_pipeline_model_parallel_size != 0:
+ raise ValueError(
+ f"Number of layers ({config.num_hidden_layers}) must be divisible by number of pipeline parallelism"
+ f" ({args.target_pipeline_model_parallel_size})"
+ )
+
+ num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size
+
+ layer_re = re.compile(r"transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ for pp_rank in range(args.target_pipeline_model_parallel_size):
+ layer_offset = pp_rank * num_layers
+ if pp_rank > 0:
+ output_state_dict = []
+ for i in range(args.target_tensor_model_parallel_size):
+ output_state_dict.append({})
+
+ for layer in range(num_layers):
+ pp_layer_id = layer + layer_offset
+ layers_to_copy = [
+ layer_name
+ for layer_name in state_dict.keys()
+ if layer_name.startswith(f"transformer.h.{pp_layer_id}.")
+ ]
+
+ for layer_name in layers_to_copy:
+ m = layer_re.match(layer_name)
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ _ = int(m.group(1))
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ params = state_dict[layer_name].to(dtype)
+ # handle layernorm
+ if op_name.startswith("ln"):
+ out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm"
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # handle attention K, V, Q weights
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight":
+ # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D.
+ params = params.transpose(0, 1).contiguous()
+
+ params = transformers_to_megatron_fix_query_key_value_ordering(
+ params,
+ 3.0,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
+
+ # handle attention K, V, Q bias
+ elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias":
+ params = transformers_to_megatron_fix_query_key_value_ordering(
+ params,
+ 3.0,
+ 3,
+ heads,
+ hidden_size_per_head,
+ )
+ layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}"
+
+ # handle attention and mlp weights
+ elif weight_or_bias == "weight":
+ out_name = transformers_to_megatron.get(op_name, None)
+ if out_name is None:
+ continue
+ params = params.transpose(0, 1)
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # handle attention and mlp bias
+ elif weight_or_bias == "bias":
+ out_name = transformers_to_megatron.get(op_name, None)
+ if out_name is None:
+ continue
+ layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}"
+
+ # skip
+ else:
+ continue
+
+ if op_name + "." + weight_or_bias in tensor_parallel_params:
+ dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0
+ params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim)
+
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
+ params_dict[layer_name] = (
+ params[i].clone() if (op_name + "." + weight_or_bias in tensor_parallel_params) else params
+ )
+
+ if pp_rank == args.target_pipeline_model_parallel_size - 1:
+ # handle final layernorm
+ for weight_or_bias in ["weight", "bias"]:
+ params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype)
+ layer_name = f"final_layernorm.{weight_or_bias}"
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder")
+ params_dict[layer_name] = params
+
+ # add the LM head
+ for i in range(args.target_tensor_model_parallel_size):
+ params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head")
+ params_dict["weight"] = out_word_embed[i].clone()
+
+ # saving the state dict as per the tp_rank and pp_rank
+ for tp_rank in range(args.target_tensor_model_parallel_size):
+ output_state_dict[tp_rank]["checkpoint_version"] = 3.0
+ output_state_dict[tp_rank]["args"] = margs
+ checkpoint_dir = (
+ f"mp_rank_{tp_rank:02d}"
+ if args.target_pipeline_model_parallel_size == 1
+ else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}"
+ )
+ if args.use_distributed_optimizer:
+ checkpoint_name = "model_rng.pt"
+ else:
+ checkpoint_name = "model_optim_rng.pt"
+ output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"]
+ checkpoint_dir = os.path.join(release_dir, checkpoint_dir)
+ os.makedirs(checkpoint_dir, exist_ok=True)
+ checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)
+ if args.print_checkpoint_structure:
+ print(
+ f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank"
+ f" {pp_rank}:"
+ )
+ recursive_print(None, output_state_dict[tp_rank])
+ torch.save(output_state_dict[tp_rank], checkpoint_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser = add_checkpointing_args(parser)
+ parser = add_megatron_checkpoint_args(parser)
+ parser = add_transformers_checkpoint_args(parser)
+ args = parser.parse_args()
+ if args.convert_checkpoint_from_megatron_to_transformers:
+ convert_checkpoint_from_megatron_to_transformers(args)
+ else:
+ convert_checkpoint_from_transformers_to_megatron(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..38060f8af5c7b0399f710eda2389cffd3669ea0d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py
@@ -0,0 +1,358 @@
+####################################################################################################
+
+# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+####################################################################################################
+
+#
+# Note: If when running this conversion script you're getting an exception:
+# ModuleNotFoundError: No module named 'megatron.model.enums'
+# you need to tell python where to find the clone of Megatron-LM, e.g.:
+#
+# cd /tmp
+# git clone https://github.com/NVIDIA/Megatron-LM
+# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
+#
+# if you already have it cloned elsewhere, simply adjust the path to the existing path
+#
+# If the training was done using a Megatron-LM fork, e.g.,
+# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
+# in your path, i.e., /path/to/Megatron-DeepSpeed/
+#
+
+import argparse
+import os
+import re
+import zipfile
+
+import torch
+
+from transformers import AutoTokenizer, GPT2Config
+
+
+####################################################################################################
+
+
+def recursive_print(name, val, spaces=0):
+ # Format the message.
+ if name is None:
+ msg = None
+ else:
+ fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
+ msg = fmt.format(name)
+
+ # Print and recurse (if needed).
+ if isinstance(val, dict):
+ if msg is not None:
+ print(msg)
+ for k in val.keys():
+ recursive_print(k, val[k], spaces + 2)
+ elif isinstance(val, torch.Tensor):
+ print(msg, ":", val.size())
+ else:
+ print(msg, ":", val)
+
+
+def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
+ # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
+ # for compatibility with later versions of NVIDIA Megatron-LM.
+ # The inverse operation is performed inside Megatron-LM to read checkpoints:
+ # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
+ # If param is the weight tensor of the self-attention block, the returned tensor
+ # will have to be transposed one more time to be read by HuggingFace GPT2.
+ input_shape = param.size()
+ if checkpoint_version == 1.0:
+ # version 1.0 stores [num_heads * hidden_size * num_splits, :]
+ saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 2)
+ param = param.transpose(1, 2).contiguous()
+ elif checkpoint_version >= 2.0:
+ # other versions store [num_heads * num_splits * hidden_size, :]
+ saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
+ param = param.view(*saved_shape)
+ param = param.transpose(0, 1).contiguous()
+ param = param.view(*input_shape)
+ return param
+
+
+####################################################################################################
+
+
+def convert_megatron_checkpoint(args, input_state_dict, config):
+ # The converted output model.
+ output_state_dict = {}
+
+ # old versions did not store training args
+ ds_args = input_state_dict.get("args", None)
+ if ds_args is not None:
+ # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
+ # from pprint import pprint
+ # pprint(vars(ds_args))
+
+ config.vocab_size = ds_args.padded_vocab_size
+ config.n_positions = ds_args.max_position_embeddings
+ config.n_embd = ds_args.hidden_size
+ config.n_layer = ds_args.num_layers
+ config.n_head = ds_args.num_attention_heads
+ config.n_inner = ds_args.ffn_hidden_size
+ # pprint(config)
+
+ # The number of heads.
+ heads = config.n_head
+ # The hidden_size per head.
+ hidden_size_per_head = config.n_embd // config.n_head
+ # Megatron-LM checkpoint version
+ if "checkpoint_version" in input_state_dict.keys():
+ checkpoint_version = input_state_dict["checkpoint_version"]
+ else:
+ checkpoint_version = 0.0
+
+ # The model.
+ model = input_state_dict["model"]
+ # The language model.
+ lm = model["language_model"]
+ # The embeddings.
+ embeddings = lm["embedding"]
+
+ # The word embeddings.
+ word_embeddings = embeddings["word_embeddings"]["weight"]
+ # Truncate the embedding table to vocab_size rows.
+ word_embeddings = word_embeddings[: config.vocab_size, :]
+ output_state_dict["transformer.wte.weight"] = word_embeddings
+
+ # The position embeddings.
+ pos_embeddings = embeddings["position_embeddings"]["weight"]
+ # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
+ n_positions = pos_embeddings.size(0)
+ if n_positions != config.n_positions:
+ raise ValueError(
+ f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match"
+ )
+ # Store the position embeddings.
+ output_state_dict["transformer.wpe.weight"] = pos_embeddings
+
+ # The transformer.
+ transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
+
+ # The regex to extract layer names.
+ layer_re = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
+
+ # The simple map of names for "automated" rules.
+ megatron_to_transformers = {
+ "attention.dense": ".attn.c_proj.",
+ "self_attention.dense": ".attn.c_proj.",
+ "mlp.dense_h_to_4h": ".mlp.c_fc.",
+ "mlp.dense_4h_to_h": ".mlp.c_proj.",
+ }
+
+ # Extract the layers.
+ for key, val in transformer.items():
+ # Match the name.
+ m = layer_re.match(key)
+
+ # Stop if that's not a layer
+ if m is None:
+ break
+
+ # The index of the layer.
+ layer_idx = int(m.group(1))
+ # The name of the operation.
+ op_name = m.group(2)
+ # Is it a weight or a bias?
+ weight_or_bias = m.group(3)
+
+ # The name of the layer.
+ layer_name = f"transformer.h.{layer_idx}"
+
+ # For layernorm(s), simply store the layer norm.
+ if op_name.endswith("layernorm"):
+ ln_name = "ln_1" if op_name.startswith("input") else "ln_2"
+ output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
+
+ # Transpose the QKV matrix.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "weight":
+ # Insert a tensor of 1x1xDxD bias.
+ causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.float16)).view(
+ 1, 1, n_positions, n_positions
+ )
+ output_state_dict[layer_name + ".attn.bias"] = causal_mask
+
+ # Insert a "dummy" tensor for masked_bias.
+ masked_bias = torch.tensor(-1e4, dtype=torch.float16)
+ output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias
+
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
+ # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
+ out_val = out_val.transpose(0, 1).contiguous()
+ # Store.
+ output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val
+
+ # Transpose the bias.
+ elif (
+ op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
+ ) and weight_or_bias == "bias":
+ out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
+ # Store. No change of shape.
+ output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val
+
+ # Transpose the weights.
+ elif weight_or_bias == "weight":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "weight"] = val.transpose(0, 1)
+
+ # Copy the bias.
+ elif weight_or_bias == "bias":
+ out_name = megatron_to_transformers[op_name]
+ output_state_dict[layer_name + out_name + "bias"] = val
+
+ # DEBUG.
+ assert config.n_layer == layer_idx + 1
+
+ # The final layernorm.
+ output_state_dict["transformer.ln_f.weight"] = transformer["final_layernorm.weight"]
+ output_state_dict["transformer.ln_f.bias"] = transformer["final_layernorm.bias"]
+
+ # For LM head, transformers' wants the matrix to weight embeddings.
+ output_state_dict["lm_head.weight"] = word_embeddings
+
+ # It should be done!
+ return output_state_dict
+
+
+####################################################################################################
+
+
+def main():
+ # Create the argument parser.
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--print-checkpoint-structure", action="store_true")
+ parser.add_argument(
+ "path_to_checkpoint",
+ type=str,
+ help="Path to the checkpoint file (.zip archive or direct .pt file)",
+ )
+ parser.add_argument(
+ "--config_file",
+ default="",
+ type=str,
+ help="An optional config json file describing the pre-trained model.",
+ )
+ args = parser.parse_args()
+
+ # Extract the basename.
+ basename = os.path.dirname(args.path_to_checkpoint)
+
+ # Load the model.
+ # the .zip is very optional, let's keep it for backward compatibility
+ print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}")
+ if args.path_to_checkpoint.endswith(".zip"):
+ with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
+ with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
+ input_state_dict = torch.load(pytorch_dict, map_location="cpu")
+ else:
+ input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
+
+ ds_args = input_state_dict.get("args", None)
+
+ # Read the config, or default to the model released by NVIDIA.
+ if args.config_file == "":
+ if ds_args is not None:
+ if ds_args.bias_gelu_fusion:
+ activation_function = "gelu_fast"
+ elif ds_args.openai_gelu:
+ activation_function = "gelu_new"
+ else:
+ activation_function = "gelu"
+ else:
+ # in the very early days this used to be "gelu_new"
+ activation_function = "gelu_new"
+
+ # Spell out all parameters in case the defaults change.
+ config = GPT2Config(
+ vocab_size=50257,
+ n_positions=1024,
+ n_embd=1024,
+ n_layer=24,
+ n_head=16,
+ n_inner=4096,
+ activation_function=activation_function,
+ resid_pdrop=0.1,
+ embd_pdrop=0.1,
+ attn_pdrop=0.1,
+ layer_norm_epsilon=1e-5,
+ initializer_range=0.02,
+ summary_type="cls_index",
+ summary_use_proj=True,
+ summary_activation=None,
+ summary_proj_to_labels=True,
+ summary_first_dropout=0.1,
+ scale_attn_weights=True,
+ use_cache=True,
+ bos_token_id=50256,
+ eos_token_id=50256,
+ )
+ else:
+ config = GPT2Config.from_json_file(args.config_file)
+
+ config.architectures = ["GPT2LMHeadModel"]
+
+ # Convert.
+ print("Converting")
+ output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
+
+ # Print the structure of converted state dict.
+ if args.print_checkpoint_structure:
+ recursive_print(None, output_state_dict)
+
+ # Add tokenizer class info to config
+ # see https://github.com/huggingface/transformers/issues/13906)
+ if ds_args is not None:
+ tokenizer_type = ds_args.tokenizer_type
+ if tokenizer_type == "GPT2BPETokenizer":
+ tokenizer_model_name = "openai-community/gpt2"
+ elif tokenizer_type == "PretrainedFromHF":
+ tokenizer_model_name = ds_args.tokenizer_name_or_path
+ else:
+ raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}")
+ else:
+ tokenizer_model_name = "openai-community/gpt2"
+
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name)
+ tokenizer_class = type(tokenizer).__name__
+ config.tokenizer_class = tokenizer_class
+
+ # Store the config to file.
+ print("Saving config")
+ config.save_pretrained(basename)
+
+ # Save tokenizer based on args
+ print(f"Adding {tokenizer_class} tokenizer files")
+ tokenizer.save_pretrained(basename)
+
+ # Store the state_dict to file.
+ output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
+ print(f'Saving checkpoint to "{output_checkpoint_file}"')
+ torch.save(output_state_dict, output_checkpoint_file)
+
+
+####################################################################################################
+
+if __name__ == "__main__":
+ main()
+
+####################################################################################################
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3194f99931a4d689f6bab0cf3cb9dc6abaf11fb8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__init__.py
@@ -0,0 +1,108 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
+ "feature_extraction_speech_to_text": ["Speech2TextFeatureExtractor"],
+ "processing_speech_to_text": ["Speech2TextProcessor"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_speech_to_text"] = ["Speech2TextTokenizer"]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_speech_to_text"] = [
+ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFSpeech2TextForConditionalGeneration",
+ "TFSpeech2TextModel",
+ "TFSpeech2TextPreTrainedModel",
+ ]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_speech_to_text"] = [
+ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "Speech2TextForConditionalGeneration",
+ "Speech2TextModel",
+ "Speech2TextPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig
+ from .feature_extraction_speech_to_text import Speech2TextFeatureExtractor
+ from .processing_speech_to_text import Speech2TextProcessor
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_speech_to_text import Speech2TextTokenizer
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_speech_to_text import (
+ TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFSpeech2TextForConditionalGeneration,
+ TFSpeech2TextModel,
+ TFSpeech2TextPreTrainedModel,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_speech_to_text import (
+ SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ Speech2TextForConditionalGeneration,
+ Speech2TextModel,
+ Speech2TextPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2cc8cab9da07b425c7c2e4cdf5c21da22ebeb839
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a05eccff7ad29a94e92139c682d0c9c4d19e959
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/configuration_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fcc2799ddf908020a71d80be542fb21a334681c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/convert_s2t_fairseq_to_tfms.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42eee6702e7fb1d73a299ee6483590fdbae38cf8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/feature_extraction_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f161465187be3fba71120e593be0b91010e3691
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88e5c502fd460864e0071fcf35281ecdaf55fe0b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/modeling_tf_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e64f7b11d3758de5d0a30dd97877fb958c189fc2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/processing_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..568730a2f9ef63a7613a177e8ba7161c3f7d88b0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/__pycache__/tokenization_speech_to_text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..67dee8dc0bc361e5046052263651d36273d41d7f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/configuration_speech_to_text.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Speech2Text model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class Speech2TextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a
+ Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Speech2Text
+ [facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 10000):
+ Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`Speech2TextModel`]
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
+ encoder_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of decoder layers.
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
+ more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](https://arxiv.org/abs/1909.11556) for
+ more details.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether the model should return the last key/values attentions (not used by all models).
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether the model is set up as an encoder-decoder architecture for sequence-to-sequence tasks.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ d_model (`int`, *optional*, defaults to 256):
+ Dimensionality of the layers and the pooler layer.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ decoder_start_token_id (`int`, *optional*, defaults to 2):
+ The initial token ID of the decoder when decoding sequences.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Whether the embeddings are scaled by the square root of `d_model`.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0):
+ The id of the beginning-of-sequence token.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the end-of-sequence token.
+ max_source_positions (`int`, *optional*, defaults to 6000):
+ The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
+ max_target_positions (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ num_conv_layers (`int`, *optional*, defaults to 2):
+ Number of 1D convolutional layers in the conv module.
+ conv_kernel_sizes (`Tuple[int]`, *optional*, defaults to `(5, 5)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length
+ of `conv_kernel_sizes` has to match `num_conv_layers`.
+ conv_channels (`int`, *optional*, defaults to 1024):
+ An integer defining the number of output channels of each convolution layers except the final one in the
+ conv module.
+ input_feat_per_channel (`int`, *optional*, defaults to 80):
+ An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank
+ features.
+ input_channels (`int`, *optional*, defaults to 1):
+ An integer specifying number of input channels of the input feature vector.
+
+ Example:
+
+ ```python
+ >>> from transformers import Speech2TextConfig, Speech2TextModel
+
+ >>> # Initializing a Speech2Text s2t_transformer_s style configuration
+ >>> configuration = Speech2TextConfig()
+
+ >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration
+ >>> model = Speech2TextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "speech_to_text"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=10000,
+ encoder_layers=12,
+ encoder_ffn_dim=2048,
+ encoder_attention_heads=4,
+ decoder_layers=6,
+ decoder_ffn_dim=2048,
+ decoder_attention_heads=4,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="relu",
+ d_model=256,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=2,
+ scale_embedding=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ max_source_positions=6000,
+ max_target_positions=1024,
+ num_conv_layers=2,
+ conv_kernel_sizes=(5, 5),
+ conv_channels=1024,
+ input_feat_per_channel=80,
+ input_channels=1,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ self.max_source_positions = max_source_positions
+ self.max_target_positions = max_target_positions
+ self.num_conv_layers = num_conv_layers
+ self.conv_kernel_sizes = list(conv_kernel_sizes)
+ self.conv_channels = conv_channels
+ self.input_feat_per_channel = input_feat_per_channel
+ self.input_channels = input_channels
+
+ if len(self.conv_kernel_sizes) != self.num_conv_layers:
+ raise ValueError(
+ "Configuration for convolutional module is incorrect. "
+ "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
+ f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
+ f"`config.num_conv_layers = {self.num_conv_layers}`."
+ )
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb4d852624790998657161f6b15cd9572aca7f78
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py
@@ -0,0 +1,121 @@
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+from torch import nn
+
+from transformers import Speech2TextConfig, Speech2TextForConditionalGeneration
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "encoder.version",
+ "decoder.version",
+ "model.encoder.version",
+ "model.decoder.version",
+ "decoder.output_projection.weight",
+ "_float_tensor",
+ "encoder.embed_positions._float_tensor",
+ "decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def rename_keys(s_dict):
+ keys = list(s_dict.keys())
+ for key in keys:
+ if "transformer_layers" in key:
+ s_dict[key.replace("transformer_layers", "layers")] = s_dict.pop(key)
+ elif "subsample" in key:
+ s_dict[key.replace("subsample", "conv")] = s_dict.pop(key)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
+ m2m_100 = torch.load(checkpoint_path, map_location="cpu")
+ args = m2m_100["args"]
+ state_dict = m2m_100["model"]
+ lm_head_weights = state_dict["decoder.output_projection.weight"]
+
+ remove_ignore_keys_(state_dict)
+ rename_keys(state_dict)
+
+ vocab_size = state_dict["decoder.embed_tokens.weight"].shape[0]
+
+ tie_embeds = args.share_decoder_input_output_embed
+
+ conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(",")]
+ config = Speech2TextConfig(
+ vocab_size=vocab_size,
+ max_source_positions=args.max_source_positions,
+ max_target_positions=args.max_target_positions,
+ encoder_layers=args.encoder_layers,
+ decoder_layers=args.decoder_layers,
+ encoder_attention_heads=args.encoder_attention_heads,
+ decoder_attention_heads=args.decoder_attention_heads,
+ encoder_ffn_dim=args.encoder_ffn_embed_dim,
+ decoder_ffn_dim=args.decoder_ffn_embed_dim,
+ d_model=args.encoder_embed_dim,
+ dropout=args.dropout,
+ attention_dropout=args.attention_dropout,
+ activation_dropout=args.activation_dropout,
+ activation_function="relu",
+ num_conv_layers=len(conv_kernel_sizes),
+ conv_channels=args.conv_channels,
+ conv_kernel_sizes=conv_kernel_sizes,
+ input_feat_per_channel=args.input_feat_per_channel,
+ input_channels=args.input_channels,
+ tie_word_embeddings=tie_embeds,
+ num_beams=5,
+ max_length=200,
+ use_cache=True,
+ decoder_start_token_id=2,
+ early_stopping=True,
+ )
+
+ model = Speech2TextForConditionalGeneration(config)
+ missing, unexpected = model.model.load_state_dict(state_dict, strict=False)
+ if len(missing) > 0 and not set(missing) <= {
+ "encoder.embed_positions.weights",
+ "decoder.embed_positions.weights",
+ }:
+ raise ValueError(
+ "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
+ f" but all the following weights are missing {missing}"
+ )
+
+ if tie_embeds:
+ model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
+ else:
+ model.lm_head.weight.data = lm_head_weights
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ args = parser.parse_args()
+ convert_fairseq_s2t_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..193f2dda0946f1ca9c121652c95e475f38b3bf0b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/feature_extraction_speech_to_text.py
@@ -0,0 +1,297 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Feature extractor class for Speech2Text
+"""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import PaddingStrategy, TensorType, is_speech_available, logging
+
+
+if is_speech_available():
+ import torch
+ import torchaudio.compliance.kaldi as ta_kaldi
+
+logger = logging.get_logger(__name__)
+
+
+class Speech2TextFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a Speech2Text feature extractor.
+
+ This feature extractor inherits from [`Speech2TextFeatureExtractor`] which contains most of the main methods. Users
+ should refer to this superclass for more information regarding those methods.
+
+ This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
+ otherwise, and applies utterance-level cepstral mean and variance normalization to the extracted features.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 80):
+ The feature dimension of the extracted features.
+ sampling_rate (`int`, *optional*, defaults to 16000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ num_mel_bins (`int`, *optional*, defaults to 80):
+ Number of Mel-frequency bins.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ The value that is used to fill the padding vectors.
+ do_ceptral_normalize (`bool`, *optional*, defaults to `True`):
+ Whether or not to apply utterance-level cepstral mean and variance normalization to extracted features.
+ normalize_means (`bool`, *optional*, defaults to `True`):
+ Whether or not to zero-mean normalize the extracted features.
+ normalize_vars (`bool`, *optional*, defaults to `True`):
+ Whether or not to unit-variance normalize the extracted features.
+ """
+
+ model_input_names = ["input_features", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size=80,
+ sampling_rate=16000,
+ num_mel_bins=80,
+ padding_value=0.0,
+ do_ceptral_normalize=True,
+ normalize_means=True,
+ normalize_vars=True,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+ self.num_mel_bins = num_mel_bins
+ self.do_ceptral_normalize = do_ceptral_normalize
+ self.normalize_means = normalize_means
+ self.normalize_vars = normalize_vars
+ self.return_attention_mask = True
+
+ if not is_speech_available():
+ mel_filters = mel_filter_bank(
+ num_frequency_bins=256,
+ num_mel_filters=self.num_mel_bins,
+ min_frequency=20,
+ max_frequency=sampling_rate // 2,
+ sampling_rate=sampling_rate,
+ norm=None,
+ mel_scale="kaldi",
+ triangularize_in_mel_space=True,
+ )
+
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
+ self.window = window_function(400, "povey", periodic=False)
+
+ def _extract_fbank_features(
+ self,
+ waveform: np.ndarray,
+ ) -> np.ndarray:
+ """
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
+ and hence the waveform should not be normalized before feature extraction.
+ """
+ waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
+ if is_speech_available():
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
+ features = ta_kaldi.fbank(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate)
+ features = features.numpy()
+ else:
+ waveform = np.squeeze(waveform)
+ features = spectrogram(
+ waveform,
+ self.window,
+ frame_length=400,
+ hop_length=160,
+ fft_length=512,
+ power=2.0,
+ center=False,
+ preemphasis=0.97,
+ mel_filters=self.mel_filters,
+ log_mel="log",
+ mel_floor=1.192092955078125e-07,
+ remove_dc_offset=True,
+ ).T
+ return features
+
+ @staticmethod
+ def utterance_cmvn(
+ x: np.ndarray,
+ input_length: int,
+ normalize_means: Optional[bool] = True,
+ normalize_vars: Optional[bool] = True,
+ padding_value: float = 0.0,
+ ) -> np.ndarray:
+ # make sure we normalize float32 arrays
+ if normalize_means:
+ mean = x[:input_length].mean(axis=0)
+ x = np.subtract(x, mean)
+ if normalize_vars:
+ std = x[:input_length].std(axis=0)
+ x = np.divide(x, std)
+
+ if input_length < x.shape[0]:
+ x[input_length:] = padding_value
+
+ # make sure array is in float32
+ x = x.astype(np.float32)
+
+ return x
+
+ def normalize(
+ self, input_features: List[np.ndarray], attention_mask: Optional[np.ndarray] = None
+ ) -> List[np.ndarray]:
+ lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
+ return [
+ self.utterance_cmvn(x, n, self.normalize_means, self.normalize_vars, self.padding_value)
+ for x, n in zip(input_features, lengths)
+ ]
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+
+
+ For Speech2TextTransformer models, `attention_mask` should always be passed for batched inference, to
+ avoid subtle bugs.
+
+
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors.
+ padding_value (`float`, defaults to 0.0):
+ The value that is used to fill the padding values / vectors.
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [raw_speech]
+
+ # extract fbank features
+ features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
+
+ # convert into correct format for padding
+ encoded_inputs = BatchFeature({"input_features": features})
+
+ padded_inputs = self.pad(
+ encoded_inputs,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+
+ # make sure list is in array format
+ input_features = padded_inputs.get("input_features")
+ if isinstance(input_features[0], list):
+ padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
+
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ # Utterance-level cepstral mean and variance normalization
+ if self.do_ceptral_normalize:
+ attention_mask = (
+ np.array(attention_mask, dtype=np.int32)
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
+ else None
+ )
+ padded_inputs["input_features"] = self.normalize(
+ padded_inputs["input_features"], attention_mask=attention_mask
+ )
+
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..6898cc081fe91f122d1a5a7e059251b7a5a25909
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_speech_to_text.py
@@ -0,0 +1,1370 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Speech2Text model."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_speech_to_text import Speech2TextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "Speech2TextConfig"
+
+
+from ..deprecated._archive_maps import SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class Conv1dSubsampler(nn.Module):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config):
+ super(Conv1dSubsampler, self).__init__()
+ self.config = config
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+ self.mid_channels = config.conv_channels
+ self.out_channels = config.d_model
+ self.kernel_sizes = config.conv_kernel_sizes
+
+ self.conv_layers = nn.ModuleList(
+ nn.Conv1d(
+ self.in_channels if i == 0 else self.mid_channels // 2,
+ self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
+ kernel_size=k,
+ stride=2,
+ padding=k // 2,
+ )
+ for i, k in enumerate(self.kernel_sizes)
+ )
+
+ def forward(self, input_features):
+ hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
+ for conv in self.conv_layers:
+ hidden_states = conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, dim=1)
+ hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
+ return hidden_states
+
+
+class Speech2TextSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.weights = nn.Parameter(emb_weights)
+ self.weights.requires_grad = False
+ self.weights.detach_()
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
+
+ def create_position_ids_from_input_ids(
+ self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text
+class Speech2TextAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[Speech2TextConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+SPEECH_TO_TEXT_ATTENTION_CLASSES = {"eager": Speech2TextAttention}
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
+class Speech2TextEncoderLayer(nn.Module):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Speech2Text, MBART->SPEECH_TO_TEXT
+class Speech2TextDecoderLayer(nn.Module):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = SPEECH_TO_TEXT_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class Speech2TextPreTrainedModel(PreTrainedModel):
+ config_class = Speech2TextConfig
+ base_model_prefix = "model"
+ main_input_name = "input_features"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for i in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ subsampled_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1))
+ bsz = attention_mask.size()[0]
+ attention_mask = torch.zeros(
+ (bsz, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+
+ # these two operations makes sure that all values
+ # before the output lengths indices are attended to
+ attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long()
+ return attention_mask
+
+
+SPEECH_TO_TEXT_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Speech2TextConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
+ tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should read
+ [`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class Speech2TextEncoder(Speech2TextPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`Speech2TextEncoderLayer`].
+
+ Args:
+ config: Speech2TextConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.conv = Conv1dSubsampler(config)
+
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
+ self.max_source_positions,
+ embed_dim,
+ self.padding_idx,
+ )
+ self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_features,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_features (`torch.LongTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
+ padding and conversion into a tensor of type `torch.FloatTensor`. See
+ [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ inputs_embeds = self.conv(input_features)
+ inputs_embeds = self.embed_scale * inputs_embeds
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
+ padding_mask = attention_mask.ne(1).long()
+ else:
+ padding_mask = torch.zeros(inputs_embeds.shape[:2], dtype=torch.long, device=inputs_embeds.device)
+
+ embed_pos = self.embed_positions(padding_mask)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class Speech2TextDecoder(Speech2TextPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`]
+
+ Args:
+ config: Speech2TextConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_target_positions
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
+ self.max_target_positions,
+ config.d_model,
+ self.padding_idx,
+ )
+
+ self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)])
+
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
+ on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class Speech2TextModel(Speech2TextPreTrainedModel):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+
+ self.encoder = Speech2TextEncoder(config)
+ self.decoder = Speech2TextDecoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_features: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import Speech2TextModel, AutoFeatureExtractor
+ >>> from datasets import load_dataset
+
+ >>> model = Speech2TextModel.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> inputs = feature_extractor(
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
+ ... )
+ >>> input_features = inputs.input_features
+ >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
+ >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
+ >>> list(last_hidden_state.shape)
+ [1, 2, 256]
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # downsample encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self._get_feature_vector_attention_mask(
+ encoder_outputs[0].shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.model = Speech2TextModel(config)
+ self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_features: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
+ or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
+ only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration
+ >>> from datasets import load_dataset
+
+ >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr")
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+
+ >>> inputs = processor(
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
+ ... )
+ >>> input_features = inputs.input_features
+
+ >>> generated_ids = model.generate(inputs=input_features)
+
+ >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ >>> transcription
+ 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fd6bd21a593c90d671a595b5faa056a97e71f19
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/modeling_tf_speech_to_text.py
@@ -0,0 +1,1607 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TensorFlow Speech2Text model."""
+
+
+from __future__ import annotations
+
+import random
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation, glu
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSharedEmbeddings,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_speech_to_text import Speech2TextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "Speech2TextConfig"
+_CHECKPOINT_FOR_DOC = "facebook/s2t-small-librispeech-asr"
+
+
+from ..deprecated._archive_maps import TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+ start_tokens = tf.fill(
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
+ )
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+class TFConv1dSubsampler(keras.layers.Layer):
+ """
+ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
+ via gated linear units (https://arxiv.org/abs/1911.08460)
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.num_layers = config.num_conv_layers
+ self.in_channels = config.input_feat_per_channel * config.input_channels
+ self.mid_channels = config.conv_channels
+ self.out_channels = config.d_model
+ self.kernel_sizes = config.conv_kernel_sizes
+
+ self.conv_layers = [
+ keras.layers.Conv1D(
+ filters=self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
+ kernel_size=k,
+ strides=2,
+ name=f"conv_layers.{i}",
+ )
+ for i, k in enumerate(self.kernel_sizes)
+ ]
+
+ def call(self, input_features: tf.Tensor) -> tf.Tensor:
+ # TF Conv1D assumes Batch x Time x Channels, same as the input
+ hidden_states = tf.cast(input_features, tf.float32)
+ for i, conv in enumerate(self.conv_layers):
+ # equivalent to `padding=k // 2` on PT's `nn.Conv1d`
+ pad_len = self.kernel_sizes[i] // 2
+ hidden_shapes = shape_list(hidden_states)
+ hidden_states = tf.concat(
+ (
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
+ hidden_states,
+ tf.zeros((hidden_shapes[0], pad_len, hidden_shapes[2])),
+ ),
+ axis=1,
+ )
+
+ hidden_states = conv(hidden_states)
+ hidden_states = glu(hidden_states, axis=2) # GLU over the Channel dimension
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv_layers", None) is not None:
+ for i, layer in enumerate(self.conv_layers):
+ with tf.name_scope(layer.name):
+ layer.build([None, None, self.in_channels] if i == 0 else [None, None, self.mid_channels // 2])
+
+
+class TFSpeech2TextSinusoidalPositionalEmbedding(keras.layers.Layer):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.embedding_weights = self._get_embedding(num_positions + self.offset, embedding_dim, padding_idx)
+
+ @staticmethod
+ def _get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None) -> tf.Tensor:
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = tf.math.log(10000.0) / (half_dim - 1)
+ emb = tf.math.exp(tf.range(half_dim, dtype=tf.float32) * -emb)
+ emb = tf.expand_dims(tf.range(num_embeddings, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0)
+ emb = tf.reshape(tf.concat([tf.math.sin(emb), tf.math.cos(emb)], axis=1), shape=[num_embeddings, -1])
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = tf.concat([emb, tf.zeros(num_embeddings, 1)], axis=1)
+ if padding_idx is not None:
+ emb = tf.concat([emb[:padding_idx, :], tf.zeros((1, tf.shape(emb)[1])), emb[padding_idx + 1 :, :]], axis=0)
+ return emb
+
+ def call(self, input_ids: tf.Tensor, past_key_values_length: int = 0) -> tf.Tensor:
+ bsz, seq_len = shape_list(input_ids)
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+
+ # Matt: The PyTorch code does a lot of work to cache the embeddings, setting the cached values as a
+ # model attribute in the forward pass. This is extremely forbidden in TF, which wants forward calls to be
+ # idempotent. TF doesn't need that caching anyway, since it can just store constants during compilation,
+ # so we just remove all of that code.
+ embeddings = self._get_embedding(
+ self.padding_idx + 1 + seq_len + self.offset + past_key_values_length, self.embedding_dim, self.padding_idx
+ )
+ return tf.reshape(tf.gather(embeddings, tf.reshape(position_ids, (-1,)), axis=0), (bsz, seq_len, -1))
+
+ @staticmethod
+ def create_position_ids_from_input_ids(
+ input_ids: tf.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ) -> tf.Tensor:
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: tf.Tensor x:
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, padding_idx), dtype=tf.int32)
+ incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask
+ return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Speech2Text
+class TFSpeech2TextAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFSpeech2TextAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False
+ ):
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ training=training,
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+
+ self.self_attn = TFSpeech2TextAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFSpeech2TextAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[tf.Tensor] | None = None,
+ training=False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ `(decoder_attention_heads,)`
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ `(decoder_attention_heads,)`
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ training=training,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ training=training,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFSpeech2TextPreTrainedModel(TFPreTrainedModel):
+ config_class = Speech2TextConfig
+ base_model_prefix = "model"
+ main_input_name = "input_features"
+ _keys_to_ignore_on_load_unexpected = [r"encoder.embed_positions.weights"]
+
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for _ in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ @property
+ def input_signature(self):
+ return {
+ "input_features": tf.TensorSpec(
+ (None, None, self.config.input_feat_per_channel * self.config.input_channels),
+ tf.float32,
+ name="input_features",
+ ),
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
+ "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"),
+ "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"),
+ }
+
+
+SPEECH_TO_TEXT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`Speech2TextConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SPEECH_TO_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained
+ by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*
+ via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
+ [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a
+ tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ decoder_inputs_embeds (`tf.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFSpeech2TextEncoder(keras.layers.Layer):
+ config_class = Speech2TextConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFSpeech2TextEncoderLayer`].
+
+ Args:
+ config: Speech2TextConfig
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = tf.math.sqrt(float(embed_dim)) if config.scale_embedding else 1.0
+
+ self.conv = TFConv1dSubsampler(config, name="conv")
+
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
+ num_positions=config.max_source_positions,
+ embedding_dim=embed_dim,
+ padding_idx=self.padding_idx,
+ name="embed_positions",
+ )
+ self.layers = [TFSpeech2TextEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
+ """
+ Computes the output length of the convolutional layers
+ """
+ for _ in range(self.config.num_conv_layers):
+ input_lengths = (input_lengths - 1) // 2 + 1
+
+ return input_lengths
+
+ def _get_feature_vector_attention_mask(self, feature_vector_length, attention_mask):
+ # generate creates 3D attention mask, because of the shape of input_features
+ # convert it to 2D if thats the case
+ if len(attention_mask.shape) > 2:
+ attention_mask = attention_mask[:, :, -1]
+
+ subsampled_lengths = self._get_feat_extract_output_lengths(tf.math.reduce_sum(attention_mask, -1))
+ bsz = shape_list(attention_mask)[0]
+ indices = tf.concat(
+ (
+ tf.expand_dims(tf.range(bsz, dtype=attention_mask.dtype), -1),
+ tf.expand_dims(subsampled_lengths - 1, -1),
+ ),
+ axis=-1,
+ )
+ attention_mask = tf.scatter_nd(indices=indices, updates=tf.ones(bsz), shape=[bsz, feature_vector_length])
+ attention_mask = tf.cast(tf.reverse(tf.math.cumsum(tf.reverse(attention_mask, [-1]), -1), [-1]), tf.int64)
+ return attention_mask
+
+ @unpack_inputs
+ def call(
+ self,
+ input_features=None,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ """
+ Args:
+ input_features (`tf.Tensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be
+ obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
+ `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
+ `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the fbank features,
+ padding and conversion into a tensor of floats. See [`~Speech2TextFeatureExtractor.__call__`]
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ if input_features is None:
+ raise ValueError("You have to specify input_features")
+
+ inputs_embeds = self.conv(input_features)
+ inputs_embeds = self.embed_scale * inputs_embeds
+
+ # subsample attention mask if necessary
+ if attention_mask is not None:
+ attention_mask = self._get_feature_vector_attention_mask(tf.shape(inputs_embeds)[1], attention_mask)
+ padding_mask = tf.cast(tf.math.not_equal(attention_mask, 1), tf.int64)
+ else:
+ padding_mask = tf.zeros(tf.shape(inputs_embeds)[:-1], dtype=tf.int64)
+
+ embed_pos = self.embed_positions(padding_mask)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ training=training,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build(None)
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFSpeech2TextDecoder(keras.layers.Layer):
+ config_class = Speech2TextConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFSpeech2TextDecoderLayer`]
+
+ Args:
+ config: Speech2TextConfig
+ """
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_target_positions
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = TFSharedEmbeddings(config.vocab_size, config.d_model, name="embed_tokens")
+
+ self.embed_positions = TFSpeech2TextSinusoidalPositionalEmbedding(
+ num_positions=config.max_target_positions,
+ embedding_dim=config.d_model,
+ padding_idx=self.padding_idx,
+ name="embed_positions",
+ )
+
+ self.layers = [TFSpeech2TextDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+ else:
+ inputs_embeds = inputs_embeds
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ # embed positions
+ positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+ cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ next_decoder_cache += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+
+ if not return_dict:
+ return hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_tokens", None) is not None:
+ with tf.name_scope(self.embed_tokens.name):
+ self.embed_tokens.build(None)
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFSpeech2TextMainLayer(keras.layers.Layer):
+ config_class = Speech2TextConfig
+
+ def __init__(self, config: Speech2TextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+
+ self.encoder = TFSpeech2TextEncoder(config, name="encoder")
+ self.decoder = TFSpeech2TextDecoder(config, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.decoder.embed_tokens = new_embeddings
+
+ @unpack_inputs
+ def call(
+ self,
+ input_features=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ encoder_outputs=None,
+ past_key_values=None,
+ decoder_inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ **kwargs,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ # downsample encoder attention mask
+ if attention_mask is not None:
+ encoder_attention_mask = self.encoder._get_feature_vector_attention_mask(
+ tf.shape(encoder_outputs[0])[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = None
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class TFSpeech2TextModel(TFSpeech2TextPreTrainedModel):
+ def __init__(self, config: Speech2TextConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFSpeech2TextMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_features: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ **kwargs,
+ ) -> Union[Tuple, TFSeq2SeqModelOutput]:
+ outputs = self.model(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+@add_start_docstrings(
+ "The Speech2Text Model with a language modeling head. Can be used for summarization.",
+ SPEECH_TO_TEXT_START_DOCSTRING,
+)
+class TFSpeech2TextForConditionalGeneration(TFSpeech2TextPreTrainedModel, TFCausalLanguageModelingLoss):
+ def __init__(self, config: Speech2TextConfig):
+ super().__init__(config)
+ self.model = TFSpeech2TextMainLayer(config, name="model")
+ self.lm_head = keras.layers.Dense(self.config.vocab_size, use_bias=False, name="lm_head")
+ # TODO (Joao): investigate why Speech2Text has numerical issues in XLA generate
+ self.supports_xla_generation = False
+ self.config = config
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def resize_token_embeddings(self, new_num_tokens: int) -> tf.Variable:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens)
+ return new_embeddings
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_features: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_input_ids: np.ndarray | tf.Tensor | None = None,
+ decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ decoder_head_mask: np.ndarray | tf.Tensor | None = None,
+ cross_attn_head_mask: np.ndarray | tf.Tensor | None = None,
+ encoder_outputs: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[Tuple, TFSeq2SeqLMOutput]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import tensorflow as tf
+ >>> from transformers import Speech2TextProcessor, TFSpeech2TextForConditionalGeneration
+ >>> from datasets import load_dataset
+ >>> import soundfile as sf
+
+ >>> model = TFSpeech2TextForConditionalGeneration.from_pretrained(
+ ... "facebook/s2t-small-librispeech-asr", from_pt=True
+ ... )
+ >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr")
+
+
+ >>> def map_to_array(batch):
+ ... speech, _ = sf.read(batch["file"])
+ ... batch["speech"] = speech
+ ... return batch
+
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.map(map_to_array)
+ >>> ds.set_format(type="tf")
+
+ >>> input_features = processor(
+ ... ds["speech"][0], sampling_rate=16000, return_tensors="tf"
+ ... ).input_features # Batch size 1
+ >>> generated_ids = model.generate(input_features)
+
+ >>> transcription = processor.batch_decode(generated_ids)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_features=input_features,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = self.lm_head(outputs[0])
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ return {
+ "input_features": None, # needs to be passed to make Keras.layer.__call__ happy
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build([None, None, self.config.d_model])
+
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if tf_weight == "lm_head.weight":
+ return tf_weight, "model.decoder.embed_tokens.weight"
+ else:
+ return (tf_weight,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..42e900633867b3d83be4238c548932ae582aa623
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/processing_speech_to_text.py
@@ -0,0 +1,116 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Speech processor class for Speech2Text
+"""
+import warnings
+from contextlib import contextmanager
+
+from ...processing_utils import ProcessorMixin
+
+
+class Speech2TextProcessor(ProcessorMixin):
+ r"""
+ Constructs a Speech2Text processor which wraps a Speech2Text feature extractor and a Speech2Text tokenizer into a
+ single processor.
+
+ [`Speech2TextProcessor`] offers all the functionalities of [`Speech2TextFeatureExtractor`] and
+ [`Speech2TextTokenizer`]. See the [`~Speech2TextProcessor.__call__`] and [`~Speech2TextProcessor.decode`] for more
+ information.
+
+ Args:
+ feature_extractor (`Speech2TextFeatureExtractor`):
+ An instance of [`Speech2TextFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`Speech2TextTokenizer`):
+ An instance of [`Speech2TextTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "Speech2TextFeatureExtractor"
+ tokenizer_class = "Speech2TextTokenizer"
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to Speech2TextFeatureExtractor's
+ [`~Speech2TextFeatureExtractor.__call__`] and returns its output. If used in the context
+ [`~Speech2TextProcessor.as_target_processor`] this method forwards all its arguments to Speech2TextTokenizer's
+ [`~Speech2TextTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
+ information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ if "raw_speech" in kwargs:
+ warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
+ audio = kwargs.pop("raw_speech")
+ else:
+ audio = kwargs.pop("audio", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ audio = args[0]
+ args = args[1:]
+
+ if audio is None and text is None:
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
+
+ if audio is not None:
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif audio is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to Speech2TextTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning
+ Speech2Text.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your audio inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.feature_extractor
+ self._in_target_context_manager = False
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..27db0a671ebc7d251f77a11ff88969921d1ccc7c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speech_to_text/tokenization_speech_to_text.py
@@ -0,0 +1,289 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for Speech2Text."""
+import json
+import os
+from pathlib import Path
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "spm_file": "sentencepiece.bpe.model",
+}
+
+
+MAX_MODEL_INPUT_SIZES = {
+ "facebook/s2t-small-librispeech-asr": 1024,
+}
+
+MUSTC_LANGS = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
+
+LANGUAGES = {"mustc": MUSTC_LANGS}
+
+
+class Speech2TextTokenizer(PreTrainedTokenizer):
+ """
+ Construct an Speech2Text tokenizer.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
+ the superclass for more information regarding such methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ spm_file (`str`):
+ Path to the [SentencePiece](https://github.com/google/sentencepiece) model file
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sentence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sentence token.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ do_upper_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to uppercase the output when decoding.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ tgt_lang (`str`, *optional*):
+ A string representing the target language.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ **kwargs
+ Additional keyword arguments passed along to [`PreTrainedTokenizer`]
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ prefix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file,
+ spm_file,
+ bos_token="",
+ eos_token="",
+ pad_token="",
+ unk_token="",
+ do_upper_case=False,
+ do_lower_case=False,
+ tgt_lang=None,
+ lang_codes=None,
+ additional_special_tokens=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.do_upper_case = do_upper_case
+ self.do_lower_case = do_lower_case
+
+ self.encoder = load_json(vocab_file)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.spm_file = spm_file
+ self.sp_model = load_spm(spm_file, self.sp_model_kwargs)
+
+ if lang_codes is not None:
+ self.lang_codes = lang_codes
+ self.langs = LANGUAGES[lang_codes]
+ self.lang_tokens = [f"" for lang in self.langs]
+ self.lang_code_to_id = {lang: self.sp_model.PieceToId(f"") for lang in self.langs}
+ if additional_special_tokens is not None:
+ additional_special_tokens = self.lang_tokens + additional_special_tokens
+ else:
+ additional_special_tokens = self.lang_tokens
+ self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0]
+
+ self.set_tgt_lang_special_tokens(self._tgt_lang)
+ else:
+ self.lang_code_to_id = {}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ do_upper_case=do_upper_case,
+ do_lower_case=do_lower_case,
+ tgt_lang=tgt_lang,
+ lang_codes=lang_codes,
+ sp_model_kwargs=self.sp_model_kwargs,
+ additional_special_tokens=additional_special_tokens,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self) -> int:
+ return len(self.encoder)
+
+ def get_vocab(self) -> Dict:
+ vocab = self.encoder.copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ @property
+ def tgt_lang(self) -> str:
+ return self._tgt_lang
+
+ @tgt_lang.setter
+ def tgt_lang(self, new_tgt_lang) -> None:
+ self._tgt_lang = new_tgt_lang
+ self.set_tgt_lang_special_tokens(new_tgt_lang)
+
+ def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:
+ """Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos]."""
+ lang_code_id = self.lang_code_to_id[tgt_lang]
+ self.prefix_tokens = [lang_code_id]
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ return self.encoder.get(token, self.encoder[self.unk_token])
+
+ def _convert_id_to_token(self, index: int) -> str:
+ """Converts an index (integer) in a token (str) using the decoder."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ decoded = self.sp_model.decode(current_sub_tokens)
+ out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ decoded = self.sp_model.decode(current_sub_tokens)
+ out_string += decoded.upper() if self.do_upper_case else decoded
+ return out_string.strip()
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
+ """Build model inputs from a sequence by appending eos_token_id."""
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + [self.eos_token_id]
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ prefix_ones = [1] * len(self.prefix_tokens)
+ suffix_ones = [1]
+ if token_ids_1 is None:
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ def __getstate__(self) -> Dict:
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d: Dict) -> None:
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ save_dir = Path(save_directory)
+ assert save_dir.is_dir(), f"{save_directory} should be a directory"
+ vocab_save_path = save_dir / (
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
+ )
+ spm_save_path = save_dir / (
+ (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
+ )
+
+ save_json(self.encoder, vocab_save_path)
+
+ if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):
+ copyfile(self.spm_file, spm_save_path)
+ elif not os.path.isfile(self.spm_file):
+ with open(spm_save_path, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (str(vocab_save_path), str(spm_save_path))
+
+
+def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
+ spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
+ spm.Load(str(path))
+ return spm
+
+
+def load_json(path: str) -> Union[Dict, List]:
+ with open(path, "r") as f:
+ return json.load(f)
+
+
+def save_json(data, path: str) -> None:
+ with open(path, "w") as f:
+ json.dump(data, f, indent=2)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/trocr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..08400fc916ec21c52ace1428079fd206345d42b9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/trocr/__init__.py
@@ -0,0 +1,59 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_speech_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
+ "processing_trocr": ["TrOCRProcessor"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_trocr"] = [
+ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TrOCRForCausalLM",
+ "TrOCRPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
+ from .processing_trocr import TrOCRProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25174e623855a57a4d234f5cf9c1c35f1345e9cb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/configuration_trocr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/configuration_trocr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ab27b351fac43910010908ac05df5ee6d4dee77
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/configuration_trocr.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/convert_trocr_unilm_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/convert_trocr_unilm_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd5a867011e4df123d5c5a9caece6e6295c628ca
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/convert_trocr_unilm_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/modeling_trocr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/modeling_trocr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..963eb791671186d37a3509546f379da91c1dda50
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/modeling_trocr.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/processing_trocr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/processing_trocr.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..662a5aa7751ae1319383e06623b9cdaeef2f5270
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/trocr/__pycache__/processing_trocr.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/configuration_trocr.py b/venv/lib/python3.10/site-packages/transformers/models/trocr/configuration_trocr.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab282db97bfc55ee8d988b9c0d25f9d8cd1d92e6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/trocr/configuration_trocr.py
@@ -0,0 +1,146 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TrOCR model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TrOCRConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an
+ TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the TrOCR
+ [microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`TrOCRForCausalLM`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`,
+ `"silu"` and `"gelu_new"` are supported.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Whether or not to scale the word embeddings by sqrt(d_model).
+ use_learned_position_embeddings (`bool`, *optional*, defaults to `True`):
+ Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used.
+ layernorm_embedding (`bool`, *optional*, defaults to `True`):
+ Whether or not to use a layernorm after the word + position embeddings.
+
+ Example:
+
+ ```python
+ >>> from transformers import TrOCRConfig, TrOCRForCausalLM
+
+ >>> # Initializing a TrOCR-base style configuration
+ >>> configuration = TrOCRConfig()
+
+ >>> # Initializing a model (with random weights) from the TrOCR-base style configuration
+ >>> model = TrOCRForCausalLM(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "trocr"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {
+ "num_attention_heads": "decoder_attention_heads",
+ "hidden_size": "d_model",
+ "num_hidden_layers": "decoder_layers",
+ }
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ d_model=1024,
+ decoder_layers=12,
+ decoder_attention_heads=16,
+ decoder_ffn_dim=4096,
+ activation_function="gelu",
+ max_position_embeddings=512,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ decoder_start_token_id=2,
+ init_std=0.02,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ scale_embedding=False,
+ use_learned_position_embeddings=True,
+ layernorm_embedding=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.d_model = d_model
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.activation_function = activation_function
+ self.max_position_embeddings = max_position_embeddings
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.init_std = init_std
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.scale_embedding = scale_embedding
+ self.use_learned_position_embeddings = use_learned_position_embeddings
+ self.layernorm_embedding = layernorm_embedding
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..428406d82c685fdb7a8da76fb096d607adb9f21c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py
@@ -0,0 +1,238 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert TrOCR checkpoints from the unilm repository."""
+
+
+import argparse
+from pathlib import Path
+
+import requests
+import torch
+from PIL import Image
+
+from transformers import (
+ RobertaTokenizer,
+ TrOCRConfig,
+ TrOCRForCausalLM,
+ TrOCRProcessor,
+ VisionEncoderDecoderModel,
+ ViTConfig,
+ ViTImageProcessor,
+ ViTModel,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(encoder_config, decoder_config):
+ rename_keys = []
+ for i in range(encoder_config.num_hidden_layers):
+ # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight")
+ )
+ rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias"))
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight")
+ )
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias")
+ )
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight")
+ )
+ rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias"))
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight")
+ )
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias")
+ )
+ rename_keys.append(
+ (f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight")
+ )
+ rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias"))
+
+ # cls token, position embeddings and patch embeddings of encoder
+ rename_keys.extend(
+ [
+ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
+ ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
+ ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
+ ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
+ ("encoder.deit.norm.weight", "encoder.layernorm.weight"),
+ ("encoder.deit.norm.bias", "encoder.layernorm.bias"),
+ ]
+ )
+
+ return rename_keys
+
+
+# we split up the matrix of each encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, encoder_config):
+ for i in range(encoder_config.num_hidden_layers):
+ # queries, keys and values (only weights, no biases)
+ in_proj_weight = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight")
+
+ state_dict[f"encoder.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
+ : encoder_config.hidden_size, :
+ ]
+ state_dict[f"encoder.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
+ encoder_config.hidden_size : encoder_config.hidden_size * 2, :
+ ]
+ state_dict[f"encoder.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
+ -encoder_config.hidden_size :, :
+ ]
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val
+
+
+# We will verify our results on an image of the IAM Handwriting Database
+def prepare_img(checkpoint_url):
+ if "handwritten" in checkpoint_url:
+ url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
+ # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
+ # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
+ # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
+ # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
+ elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
+ url = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
+ im = Image.open(requests.get(url, stream=True).raw).convert("RGB")
+ return im
+
+
+@torch.no_grad()
+def convert_tr_ocr_checkpoint(checkpoint_url, pytorch_dump_folder_path):
+ """
+ Copy/paste/tweak model's weights to our VisionEncoderDecoderModel structure.
+ """
+ # define encoder and decoder configs based on checkpoint_url
+ encoder_config = ViTConfig(image_size=384, qkv_bias=False)
+ decoder_config = TrOCRConfig()
+
+ # size of the architecture
+ if "base" in checkpoint_url:
+ decoder_config.encoder_hidden_size = 768
+ elif "large" in checkpoint_url:
+ # use ViT-large encoder
+ encoder_config.hidden_size = 1024
+ encoder_config.intermediate_size = 4096
+ encoder_config.num_hidden_layers = 24
+ encoder_config.num_attention_heads = 16
+ decoder_config.encoder_hidden_size = 1024
+ else:
+ raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
+
+ # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
+ if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
+ decoder_config.tie_word_embeddings = False
+ decoder_config.activation_function = "relu"
+ decoder_config.max_position_embeddings = 1024
+ decoder_config.scale_embedding = True
+ decoder_config.use_learned_position_embeddings = False
+ decoder_config.layernorm_embedding = False
+
+ # load HuggingFace model
+ encoder = ViTModel(encoder_config, add_pooling_layer=False)
+ decoder = TrOCRForCausalLM(decoder_config)
+ model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
+ model.eval()
+
+ # load state_dict of original model, rename some keys
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)["model"]
+
+ rename_keys = create_rename_keys(encoder_config, decoder_config)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, encoder_config)
+
+ # remove parameters we don't need
+ del state_dict["encoder.deit.head.weight"]
+ del state_dict["encoder.deit.head.bias"]
+ del state_dict["decoder.version"]
+
+ # add prefix to decoder keys
+ for key, val in state_dict.copy().items():
+ val = state_dict.pop(key)
+ if key.startswith("decoder") and "output_projection" not in key:
+ state_dict["decoder.model." + key] = val
+ else:
+ state_dict[key] = val
+
+ # load state dict
+ model.load_state_dict(state_dict)
+
+ # Check outputs on an image
+ image_processor = ViTImageProcessor(size=encoder_config.image_size)
+ tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-large")
+ processor = TrOCRProcessor(image_processor, tokenizer)
+
+ pixel_values = processor(images=prepare_img(checkpoint_url), return_tensors="pt").pixel_values
+
+ # verify logits
+ decoder_input_ids = torch.tensor([[model.config.decoder.decoder_start_token_id]])
+ outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
+ logits = outputs.logits
+
+ expected_shape = torch.Size([1, 1, 50265])
+ if "trocr-base-handwritten" in checkpoint_url:
+ expected_slice = torch.tensor(
+ [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311]
+ )
+ elif "trocr-large-handwritten" in checkpoint_url:
+ expected_slice = torch.tensor(
+ [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170]
+ )
+ elif "trocr-base-printed" in checkpoint_url:
+ expected_slice = torch.tensor(
+ [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210]
+ )
+ elif "trocr-large-printed" in checkpoint_url:
+ expected_slice = torch.tensor(
+ [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535]
+ )
+
+ if "stage1" not in checkpoint_url:
+ assert logits.shape == expected_shape, "Shape of logits not as expected"
+ assert torch.allclose(logits[0, 0, :10], expected_slice, atol=1e-3), "First elements of logits not as expected"
+
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Saving processor to {pytorch_dump_folder_path}")
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--checkpoint_url",
+ default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
+ type=str,
+ help="URL to the original PyTorch checkpoint (.pth file).",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/modeling_trocr.py b/venv/lib/python3.10/site-packages/transformers/models/trocr/modeling_trocr.py
new file mode 100644
index 0000000000000000000000000000000000000000..72ead7143ad4928bb93d24e52d7c7b936ce1c02d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/trocr/modeling_trocr.py
@@ -0,0 +1,967 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch TrOCR decoder model (based on RoBERTa)."""
+
+
+import copy
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, logging, replace_return_docstrings
+from .configuration_trocr import TrOCRConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "TrOCRConfig"
+_CHECKPOINT_FOR_DOC = "microsoft/trocr-base-handwritten"
+
+
+from ..deprecated._archive_maps import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->TrOCR
+class TrOCRLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ # TrOCR is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim)
+
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ """`input_ids' shape is expected to be [bsz x seqlen]."""
+
+ bsz, seq_len = input_ids.shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ ).expand(bsz, -1)
+
+ return super().forward(positions + self.offset)
+
+
+class TrOCRSinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.weights = self.get_embedding(num_positions, embedding_dim, padding_idx)
+ self.register_buffer("_float_tensor", torch.FloatTensor(1))
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len
+ if self.weights is None or max_pos > self.weights.size(0):
+ # recompute/expand embeddings if needed
+ self.weights = self.get_embedding(max_pos, self.embedding_dim, self.padding_idx)
+ self.weights = self.weights.to(self._float_tensor)
+
+ x = self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
+
+ return x
+
+ def create_position_ids_from_input_ids(
+ self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+class TrOCRAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper."""
+
+ def __init__(
+ self,
+ config,
+ embed_dim: int,
+ num_heads: int,
+ kdim: int = None,
+ vdim: int = None,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_cross_attention: bool = False,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.kdim = kdim if kdim is not None else embed_dim
+ self.vdim = vdim if vdim is not None else embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ if not (self.head_dim * num_heads == self.embed_dim):
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class TrOCRDecoderLayer(nn.Module):
+ def __init__(self, config: TrOCRConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+
+ self.self_attn = TrOCRAttention(
+ config,
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ if config.is_decoder:
+ self.encoder_attn = TrOCRAttention(
+ config,
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ kdim=config.cross_attention_hidden_size,
+ vdim=config.cross_attention_hidden_size,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_cross_attention=True,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size *(decoder_attention_heads,)*.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class TrOCRPreTrainedModel(PreTrainedModel):
+ config_class = TrOCRConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+TROCR_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TrOCRConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+class TrOCRDecoder(TrOCRPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`]
+
+ Args:
+ config: TrOCRConfig
+ """
+
+ def __init__(self, config: TrOCRConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+
+ if config.use_learned_position_embeddings:
+ self.embed_positions = TrOCRLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)
+ else:
+ self.embed_positions = TrOCRSinusoidalPositionalEmbedding(
+ config.max_position_embeddings + self.padding_idx + 1,
+ config.hidden_size,
+ self.padding_idx,
+ )
+
+ if config.layernorm_embedding:
+ self.layernorm_embedding = nn.LayerNorm(config.hidden_size)
+ else:
+ self.layernorm_embedding = None
+
+ self.layers = nn.ModuleList([TrOCRDecoderLayer(config) for _ in range(config.decoder_layers)])
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
+ on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_ids = input_ids.view(-1, input.shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ if self.config.use_learned_position_embeddings:
+ embed_pos = self.embed_positions(input, past_key_values_length=past_key_values_length)
+ else:
+ embed_pos = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
+
+ hidden_states = inputs_embeds + embed_pos
+
+ if self.layernorm_embedding is not None:
+ hidden_states = self.layernorm_embedding(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ input_shape = input.shape
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The TrOCR Model with a language modeling head. Can be used for summarization.",
+ TROCR_START_DOCSTRING,
+)
+class TrOCRDecoderWrapper(TrOCRPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = TrOCRDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+@add_start_docstrings(
+ "The TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and"
+ " [`VisionEncoderDecoder`].",
+ TROCR_START_DOCSTRING,
+)
+class TrOCRForCausalLM(TrOCRPreTrainedModel):
+ _tied_weights_keys = ["output_projection.weight"]
+
+ def __init__(self, config):
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.model = TrOCRDecoderWrapper(config)
+
+ self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.output_projection
+
+ def set_output_embeddings(self, new_embeddings):
+ self.output_projection = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... TrOCRConfig,
+ ... TrOCRProcessor,
+ ... TrOCRForCausalLM,
+ ... ViTConfig,
+ ... ViTModel,
+ ... VisionEncoderDecoderModel,
+ ... )
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel
+ >>> # init vision2text model with random weights
+ >>> encoder = ViTModel(ViTConfig())
+ >>> decoder = TrOCRForCausalLM(TrOCRConfig())
+ >>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
+
+ >>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel`
+ >>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten")
+ >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
+
+ >>> # load image from the IAM dataset
+ >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
+ >>> pixel_values = processor(image, return_tensors="pt").pixel_values
+ >>> text = "industry, ' Mr. Brown commented icily. ' Let us have a"
+
+ >>> # training
+ >>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id
+ >>> model.config.pad_token_id = processor.tokenizer.pad_token_id
+ >>> model.config.vocab_size = model.config.decoder.vocab_size
+
+ >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids
+ >>> outputs = model(pixel_values, labels=labels)
+ >>> loss = outputs.loss
+ >>> round(loss.item(), 2)
+ 5.30
+
+ >>> # inference
+ >>> generated_ids = model.generate(pixel_values)
+ >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ >>> generated_text
+ 'industry, " Mr. Brown commented icily. " Let us have a'
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.output_projection(outputs[0])
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/trocr/processing_trocr.py b/venv/lib/python3.10/site-packages/transformers/models/trocr/processing_trocr.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7ce7362d49afa57c475cd87f79d91caa4b17680
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/trocr/processing_trocr.py
@@ -0,0 +1,140 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for TrOCR.
+"""
+import warnings
+from contextlib import contextmanager
+
+from ...processing_utils import ProcessorMixin
+
+
+class TrOCRProcessor(ProcessorMixin):
+ r"""
+ Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.
+
+ [`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and
+ [`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for
+ more information.
+
+ Args:
+ image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):
+ An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.
+ tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):
+ An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "AutoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ feature_extractor = None
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+ self._in_target_context_manager = False
+
+ def __call__(self, *args, **kwargs):
+ """
+ When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
+ [`~AutoImageProcessor.__call__`] and returns its output. If used in the context
+ [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's
+ [`~TrOCRTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
+ """
+ # For backward compatibility
+ if self._in_target_context_manager:
+ return self.current_processor(*args, **kwargs)
+
+ images = kwargs.pop("images", None)
+ text = kwargs.pop("text", None)
+ if len(args) > 0:
+ images = args[0]
+ args = args[1:]
+
+ if images is None and text is None:
+ raise ValueError("You need to specify either an `images` or `text` input to process.")
+
+ if images is not None:
+ inputs = self.image_processor(images, *args, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif images is None:
+ return encodings
+ else:
+ inputs["labels"] = encodings["input_ids"]
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
+ docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @contextmanager
+ def as_target_processor(self):
+ """
+ Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
+ """
+ warnings.warn(
+ "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
+ "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
+ "your images inputs, or in a separate call."
+ )
+ self._in_target_context_manager = True
+ self.current_processor = self.tokenizer
+ yield
+ self.current_processor = self.image_processor
+ self._in_target_context_manager = False
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+ @property
+ def feature_extractor(self):
+ warnings.warn(
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
+ FutureWarning,
+ )
+ return self.image_processor
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vivit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec446b79707255023729510b0cbf3b3ac5801862
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vivit/__init__.py
@@ -0,0 +1,78 @@
+# flake8: noqa
+# There's no way to ignore "F401 '...' imported but unused" warnings in this
+# module, but to preserve other warnings. So, don't check this module at all.
+
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
+
+
+_import_structure = {
+ "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
+}
+try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["image_processing_vivit"] = ["VivitImageProcessor"]
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_vivit"] = [
+ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "VivitModel",
+ "VivitPreTrainedModel",
+ "VivitForVideoClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
+
+ try:
+ if not is_vision_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .image_processing_vivit import VivitImageProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_vivit import (
+ VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ VivitForVideoClassification,
+ VivitModel,
+ VivitPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17c20387db72b2fa74015f4fa8cabaabb5875eb7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/convert_vivit_flax_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/convert_vivit_flax_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38f692ef0d7eeb6fee7dff2cfef1cee464c7baca
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/convert_vivit_flax_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e09f8dad833552a742dd713b03cbc77b1530870d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/image_processing_vivit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/configuration_vivit.py b/venv/lib/python3.10/site-packages/transformers/models/vivit/configuration_vivit.py
new file mode 100644
index 0000000000000000000000000000000000000000..28ac13496f82f83a68e08982775d19787cb44485
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vivit/configuration_vivit.py
@@ -0,0 +1,119 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" ViViT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class VivitConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`VivitModel`]. It is used to instantiate a ViViT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the ViViT
+ [google/vivit-b-16x2-kinetics400](https://huggingface.co/google/vivit-b-16x2-kinetics400) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ num_frames (`int`, *optional*, defaults to 32):
+ The number of frames in each video.
+ tubelet_size (`List[int]`, *optional*, defaults to `[2, 16, 16]`):
+ The size (resolution) of each tubelet.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_fast"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"`, `"gelu_fast"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+
+ Example:
+
+ ```python
+ >>> from transformers import VivitConfig, VivitModel
+
+ >>> # Initializing a ViViT google/vivit-b-16x2-kinetics400 style configuration
+ >>> configuration = VivitConfig()
+
+ >>> # Initializing a model (with random weights) from the google/vivit-b-16x2-kinetics400 style configuration
+ >>> model = VivitModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vivit"
+
+ def __init__(
+ self,
+ image_size=224,
+ num_frames=32,
+ tubelet_size=[2, 16, 16],
+ num_channels=3,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu_fast",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-06,
+ qkv_bias=True,
+ **kwargs,
+ ):
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ self.image_size = image_size
+ self.num_frames = num_frames
+ self.tubelet_size = tubelet_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+
+ super().__init__(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/convert_vivit_flax_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/vivit/convert_vivit_flax_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..571dfe896ef5f630ed0cf7abcadcde6dc63a457c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vivit/convert_vivit_flax_to_pytorch.py
@@ -0,0 +1,230 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Flax ViViT checkpoints from the original repository to PyTorch. URL:
+https://github.com/google-research/scenic/tree/main/scenic/projects/vivit
+"""
+import argparse
+import json
+import os.path
+from collections import OrderedDict
+
+import numpy as np
+import requests
+import torch
+from flax.training.checkpoints import restore_checkpoint
+from huggingface_hub import hf_hub_download
+
+from transformers import VivitConfig, VivitForVideoClassification, VivitImageProcessor
+from transformers.image_utils import PILImageResampling
+
+
+def download_checkpoint(path):
+ url = "https://storage.googleapis.com/scenic-bucket/vivit/kinetics_400/vivit_base_16x2_unfactorized/checkpoint"
+
+ with open(path, "wb") as f:
+ with requests.get(url, stream=True) as req:
+ for chunk in req.iter_content(chunk_size=2048):
+ f.write(chunk)
+
+
+def get_vivit_config() -> VivitConfig:
+ config = VivitConfig()
+
+ config.num_labels = 400
+ repo_id = "huggingface/label-files"
+ filename = "kinetics400-id2label.json"
+
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
+ id2label = {int(k): v for k, v in id2label.items()}
+ config.id2label = id2label
+ config.label2id = {v: k for k, v in id2label.items()}
+ return config
+
+
+# We will verify our results on a video of eating spaghetti
+# Frame indices used: [ 47, 51, 55, 59, 63, 67, 71, 75, 80, 84, 88, 92, 96, 100, 104, 108, 113, 117,
+# 121, 125, 129, 133, 137, 141, 146, 150, 154, 158, 162, 166, 170, 174]
+def prepare_video():
+ file = hf_hub_download(
+ repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_32_frames.npy", repo_type="dataset"
+ )
+ video = np.load(file)
+ return list(video)
+
+
+def transform_attention(current: np.ndarray):
+ if np.ndim(current) == 2:
+ return transform_attention_bias(current)
+
+ elif np.ndim(current) == 3:
+ return transform_attention_kernel(current)
+
+ else:
+ raise Exception(f"Invalid number of dimesions: {np.ndim(current)}")
+
+
+def transform_attention_bias(current: np.ndarray):
+ return current.flatten()
+
+
+def transform_attention_kernel(current: np.ndarray):
+ return np.reshape(current, (current.shape[0], current.shape[1] * current.shape[2])).T
+
+
+def transform_attention_output_weight(current: np.ndarray):
+ return np.reshape(current, (current.shape[0] * current.shape[1], current.shape[2])).T
+
+
+def transform_state_encoder_block(state_dict, i):
+ state = state_dict["optimizer"]["target"]["Transformer"][f"encoderblock_{i}"]
+
+ prefix = f"encoder.layer.{i}."
+ new_state = {
+ prefix + "intermediate.dense.bias": state["MlpBlock_0"]["Dense_0"]["bias"],
+ prefix + "intermediate.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_0"]["kernel"]),
+ prefix + "output.dense.bias": state["MlpBlock_0"]["Dense_1"]["bias"],
+ prefix + "output.dense.weight": np.transpose(state["MlpBlock_0"]["Dense_1"]["kernel"]),
+ prefix + "layernorm_before.bias": state["LayerNorm_0"]["bias"],
+ prefix + "layernorm_before.weight": state["LayerNorm_0"]["scale"],
+ prefix + "layernorm_after.bias": state["LayerNorm_1"]["bias"],
+ prefix + "layernorm_after.weight": state["LayerNorm_1"]["scale"],
+ prefix + "attention.attention.query.bias": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["query"]["bias"]
+ ),
+ prefix + "attention.attention.query.weight": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["query"]["kernel"]
+ ),
+ prefix + "attention.attention.key.bias": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["key"]["bias"]
+ ),
+ prefix + "attention.attention.key.weight": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["key"]["kernel"]
+ ),
+ prefix + "attention.attention.value.bias": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["value"]["bias"]
+ ),
+ prefix + "attention.attention.value.weight": transform_attention(
+ state["MultiHeadDotProductAttention_0"]["value"]["kernel"]
+ ),
+ prefix + "attention.output.dense.bias": state["MultiHeadDotProductAttention_0"]["out"]["bias"],
+ prefix + "attention.output.dense.weight": transform_attention_output_weight(
+ state["MultiHeadDotProductAttention_0"]["out"]["kernel"]
+ ),
+ }
+
+ return new_state
+
+
+def get_n_layers(state_dict):
+ return sum([1 if "encoderblock_" in k else 0 for k in state_dict["optimizer"]["target"]["Transformer"].keys()])
+
+
+def transform_state(state_dict, classification_head=False):
+ transformer_layers = get_n_layers(state_dict)
+
+ new_state = OrderedDict()
+
+ new_state["layernorm.bias"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["bias"]
+ new_state["layernorm.weight"] = state_dict["optimizer"]["target"]["Transformer"]["encoder_norm"]["scale"]
+
+ new_state["embeddings.patch_embeddings.projection.weight"] = np.transpose(
+ state_dict["optimizer"]["target"]["embedding"]["kernel"], (4, 3, 0, 1, 2)
+ )
+ new_state["embeddings.patch_embeddings.projection.bias"] = state_dict["optimizer"]["target"]["embedding"]["bias"]
+
+ new_state["embeddings.cls_token"] = state_dict["optimizer"]["target"]["cls"]
+ new_state["embeddings.position_embeddings"] = state_dict["optimizer"]["target"]["Transformer"]["posembed_input"][
+ "pos_embedding"
+ ]
+
+ for i in range(transformer_layers):
+ new_state.update(transform_state_encoder_block(state_dict, i))
+
+ if classification_head:
+ new_state = {"vivit." + k: v for k, v in new_state.items()}
+ new_state["classifier.weight"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["kernel"])
+ new_state["classifier.bias"] = np.transpose(state_dict["optimizer"]["target"]["output_projection"]["bias"])
+
+ return {k: torch.tensor(v) for k, v in new_state.items()}
+
+
+# checks that image processor settings are the same as in the original implementation
+# original: https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/data/video_tfrecord_dataset.py
+# dataset specific config:
+# https://github.com/google-research/scenic/blob/main/scenic/projects/vivit/configs/kinetics400/vivit_base_k400.py
+def get_processor() -> VivitImageProcessor:
+ extractor = VivitImageProcessor()
+
+ assert extractor.do_resize is True
+ assert extractor.size == {"shortest_edge": 256}
+ assert extractor.do_center_crop is True
+ assert extractor.crop_size == {"width": 224, "height": 224}
+ assert extractor.resample == PILImageResampling.BILINEAR
+
+ # here: https://github.com/deepmind/dmvr/blob/master/dmvr/modalities.py
+ # one can seen that add_image has default values for normalization_mean and normalization_std set to 0 and 1
+ # which effectively means no normalization (and ViViT does not overwrite those when calling this func)
+ assert extractor.do_normalize is False
+ assert extractor.do_rescale is True
+ assert extractor.rescale_factor == 1 / 255
+
+ # zero-centering = True in original implementation
+ assert extractor.do_zero_centering is True
+
+ return extractor
+
+
+def convert(output_path: str):
+ flax_model_path = "checkpoint"
+
+ if not os.path.exists(flax_model_path):
+ download_checkpoint(flax_model_path)
+
+ state_dict = restore_checkpoint(flax_model_path, None)
+ new_state = transform_state(state_dict, classification_head=True)
+
+ config = get_vivit_config()
+
+ assert config.image_size == 224
+ assert config.num_frames == 32
+
+ model = VivitForVideoClassification(config)
+ model.load_state_dict(new_state)
+ model.eval()
+
+ extractor = get_processor()
+
+ video = prepare_video()
+ inputs = extractor(video, return_tensors="pt")
+
+ outputs = model(**inputs)
+
+ expected_shape = torch.Size([1, 400])
+ expected_slice = torch.tensor([-1.0543, 2.0764, -0.2104, 0.4439, -0.9658])
+
+ assert outputs.logits.shape == expected_shape
+ assert torch.allclose(outputs.logits[0, :5], expected_slice, atol=1e-4), outputs.logits[0, :5]
+
+ model.save_pretrained(output_path)
+ extractor.save_pretrained(output_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("--output_model_name", "-o", type=str, help="Output path for the converted HuggingFace model")
+
+ args = parser.parse_args()
+ convert(args.output_model_name)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/image_processing_vivit.py b/venv/lib/python3.10/site-packages/transformers/models/vivit/image_processing_vivit.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b62aedc234e88513d0d7b475939fdc9a766ec8c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vivit/image_processing_vivit.py
@@ -0,0 +1,423 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Image processor class for Vivit."""
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from transformers.utils import is_vision_available
+from transformers.utils.generic import TensorType
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import (
+ get_resize_output_image_size,
+ rescale,
+ resize,
+ to_channel_dimension_format,
+)
+from ...image_utils import (
+ IMAGENET_STANDARD_MEAN,
+ IMAGENET_STANDARD_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_kwargs,
+ validate_preprocess_arguments,
+)
+from ...utils import logging
+
+
+if is_vision_available():
+ import PIL
+
+logger = logging.get_logger(__name__)
+
+
+def make_batched(videos) -> List[List[ImageInput]]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ return [videos]
+
+ elif is_valid_image(videos):
+ return [[videos]]
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+class VivitImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a Vivit image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
+ Size of the output image after resizing. The shortest edge of the image will be resized to
+ `size["shortest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by
+ `size` in the `preprocess` method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
+ `preprocess` method.
+ do_center_crop (`bool`, *optional*, defaults to `True`):
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop`
+ parameter in the `preprocess` method.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
+ Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the
+ `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
+ parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/127.5`):
+ Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
+ in the `preprocess` method.
+ offset (`bool`, *optional*, defaults to `True`):
+ Whether to scale the image in both negative and positive directions. Can be overriden by the `offset` in
+ the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ do_center_crop: bool = True,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 127.5,
+ offset: bool = True,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"shortest_edge": 256}
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ self.do_resize = do_resize
+ self.size = size
+ self.do_center_crop = do_center_crop
+ self.crop_size = crop_size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.offset = offset
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
+ self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
+ self._valid_processor_keys = [
+ "videos",
+ "do_resize",
+ "size",
+ "resample",
+ "do_center_crop",
+ "crop_size",
+ "do_rescale",
+ "rescale_factor",
+ "offset",
+ "do_normalize",
+ "image_mean",
+ "image_std",
+ "return_tensors",
+ "data_format",
+ "input_data_format",
+ ]
+
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will
+ have the size `(h, w)`. If `size` is of the form `{"shortest_edge": s}`, the output image will have its
+ shortest edge of length `s` while keeping the aspect ratio of the original image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
+ Resampling filter to use when resiizing the image.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ size = get_size_dict(size, default_to_square=False)
+ if "shortest_edge" in size:
+ output_size = get_resize_output_image_size(
+ image, size["shortest_edge"], default_to_square=False, input_data_format=input_data_format
+ )
+ elif "height" in size and "width" in size:
+ output_size = (size["height"], size["width"])
+ else:
+ raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor.rescale
+ def rescale(
+ self,
+ image: np.ndarray,
+ scale: Union[int, float],
+ offset: bool = True,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ):
+ """
+ Rescale an image by a scale factor.
+
+ If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
+ 1/127.5, the image is rescaled between [-1, 1].
+ image = image * scale - 1
+
+ If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
+ image = image * scale
+
+ Args:
+ image (`np.ndarray`):
+ Image to rescale.
+ scale (`int` or `float`):
+ Scale to apply to the image.
+ offset (`bool`, *optional*):
+ Whether to scale the image in both negative and positive directions.
+ data_format (`str` or `ChannelDimension`, *optional*):
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format of the input image. If not provided, it will be inferred.
+ """
+ rescaled_image = rescale(
+ image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs
+ )
+
+ if offset:
+ rescaled_image = rescaled_image - 1
+
+ return rescaled_image
+
+ def _preprocess_image(
+ self,
+ image: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ offset: bool = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ """Preprocesses a single image."""
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ if offset and not do_rescale:
+ raise ValueError("For offset, do_rescale must also be set to True.")
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if is_scaled_image(image) and do_rescale:
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled images. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ input_data_format = infer_channel_dimension_format(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_center_crop:
+ image = self.center_crop(image, size=crop_size, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, offset=offset, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+ return image
+
+ def preprocess(
+ self,
+ videos: ImageInput,
+ do_resize: bool = None,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = None,
+ do_center_crop: bool = None,
+ crop_size: Dict[str, int] = None,
+ do_rescale: bool = None,
+ rescale_factor: float = None,
+ offset: bool = None,
+ do_normalize: bool = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess an image or batch of images.
+
+ Args:
+ videos (`ImageInput`):
+ Video frames to preprocess. Expects a single or batch of video frames with pixel values ranging from 0
+ to 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the image.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Size of the image after applying resize.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
+ has an effect if `do_resize` is set to `True`.
+ do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):
+ Whether to centre crop the image.
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
+ Size of the image after applying the centre crop.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the image values between `[-1 - 1]` if `offset` is `True`, `[0, 1]` otherwise.
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
+ offset (`bool`, *optional*, defaults to `self.offset`):
+ Whether to scale the image in both negative and positive directions.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the image.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the inferred channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ offset = offset if offset is not None else self.offset
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+ crop_size = crop_size if crop_size is not None else self.crop_size
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
+
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ videos = make_batched(videos)
+
+ videos = [
+ [
+ self._preprocess_image(
+ image=img,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_center_crop=do_center_crop,
+ crop_size=crop_size,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ offset=offset,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for img in video
+ ]
+ for video in videos
+ ]
+
+ data = {"pixel_values": videos}
+ return BatchFeature(data=data, tensor_type=return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.py b/venv/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.py
new file mode 100644
index 0000000000000000000000000000000000000000..08efb85e1f0254c9ab87f75fb514d6c5079d3141
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.py
@@ -0,0 +1,743 @@
+# coding=utf-8
+# Copyright 2023 Google AI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch ViViT model."""
+
+
+import math
+from typing import Optional, Set, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_vivit import VivitConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "google/vivit-b-16x2-kinetics400"
+_CONFIG_FOR_DOC = "VivitConfig"
+
+
+from ..deprecated._archive_maps import VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class VivitTubeletEmbeddings(nn.Module):
+ """
+ Construct Vivit Tubelet embeddings.
+
+ This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
+ shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.
+
+ The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
+ (width // tubelet_size[2]).
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.num_frames = config.num_frames
+ self.image_size = config.image_size
+ self.patch_size = config.tubelet_size
+ self.num_patches = (
+ (self.image_size // self.patch_size[2])
+ * (self.image_size // self.patch_size[1])
+ * (self.num_frames // self.patch_size[0])
+ )
+ self.embed_dim = config.hidden_size
+
+ self.projection = nn.Conv3d(
+ config.num_channels, config.hidden_size, kernel_size=config.tubelet_size, stride=config.tubelet_size
+ )
+
+ def forward(self, pixel_values):
+ batch_size, num_frames, num_channels, height, width = pixel_values.shape
+ if height != self.image_size or width != self.image_size:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
+ )
+
+ # permute to (batch_size, num_channels, num_frames, height, width)
+ pixel_values = pixel_values.permute(0, 2, 1, 3, 4)
+
+ x = self.projection(pixel_values)
+ # out_batch_size, out_num_channels, out_num_frames, out_height, out_width = x.shape
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return x
+
+
+class VivitEmbeddings(nn.Module):
+ """
+ Vivit Embeddings.
+
+ Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
+ self.patch_embeddings = VivitTubeletEmbeddings(config)
+
+ self.position_embeddings = nn.Parameter(
+ torch.zeros(1, self.patch_embeddings.num_patches + 1, config.hidden_size)
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def forward(self, pixel_values):
+ batch_size = pixel_values.shape[0]
+ embeddings = self.patch_embeddings(pixel_values)
+
+ cls_tokens = self.cls_token.tile([batch_size, 1, 1])
+
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Vivit
+class VivitSelfAttention(nn.Module):
+ def __init__(self, config: VivitConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->Vivit
+class VivitSelfOutput(nn.Module):
+ """
+ The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: VivitConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->Vivit
+class VivitAttention(nn.Module):
+ def __init__(self, config: VivitConfig) -> None:
+ super().__init__()
+ self.attention = VivitSelfAttention(config)
+ self.output = VivitSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class VivitIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class VivitOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+
+ hidden_states = self.dropout(hidden_states)
+
+ hidden_states = hidden_states + input_tensor
+
+ return hidden_states
+
+
+class VivitLayer(nn.Module):
+ """This corresponds to the EncoderBlock class in the scenic/vivit implementation."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = VivitAttention(config)
+ self.intermediate = VivitIntermediate(config)
+ self.output = VivitOutput(config)
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states, head_mask=None, output_attentions=False):
+ self_attention_outputs = self.attention(
+ # in Vivit, layernorm is applied before self-attention
+ self.layernorm_before(hidden_states),
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ # add self attentions if we output attention weights
+ outputs = self_attention_outputs[1:]
+
+ # first residual connection
+ hidden_states = attention_output + hidden_states
+
+ # in Vivit, layernorm is also applied after self-attention
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+
+ # second residual connection is done here
+ layer_output = self.output(layer_output, hidden_states)
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class VivitEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([VivitLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ head_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class VivitPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class VivitPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = VivitConfig
+ base_model_prefix = "vivit"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv3d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Parameter):
+ module.data.normal_(mean=0.0, std=self.config.initializer_range)
+
+
+VIVIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`VivitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+VIVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See
+ [`VivitImageProcessor.preprocess`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.",
+ VIVIT_START_DOCSTRING,
+)
+class VivitModel(VivitPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = VivitEmbeddings(config)
+ self.encoder = VivitEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pooler = VivitPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model.
+
+ Args:
+ heads_to_prune:
+ dict of {layer_num: list of heads to prune in this layer}
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+
+ >>> from transformers import VivitImageProcessor, VivitModel
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 32 frames
+ >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container=container, indices=indices)
+
+ >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
+ >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")
+
+ >>> # prepare video for the model
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> # forward pass
+ >>> outputs = model(**inputs)
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 3137, 768]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
+[CLS] token) e.g. for Kinetics-400.""",
+ VIVIT_START_DOCSTRING,
+)
+class VivitForVideoClassification(VivitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.vivit = VivitModel(config, add_pooling_layer=False)
+
+ # Classifier head
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(VIVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+ >>> import torch
+
+ >>> from transformers import VivitImageProcessor, VivitForVideoClassification
+ >>> from huggingface_hub import hf_hub_download
+
+ >>> np.random.seed(0)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample 32 frames
+ >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
+ >>> video = read_video_pyav(container=container, indices=indices)
+
+ >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
+ >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")
+
+ >>> inputs = image_processor(list(video), return_tensors="pt")
+
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+ ... logits = outputs.logits
+
+ >>> # model predicts one of the 400 Kinetics-400 classes
+ >>> predicted_label = logits.argmax(-1).item()
+ >>> print(model.config.id2label[predicted_label])
+ LABEL_116
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.vivit(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.classifier(sequence_output[:, 0, :])
+
+ loss = None
+ if labels is not None:
+ if self.num_labels == 1:
+ # We are doing regression
+ loss_fct = MSELoss()
+ loss = loss_fct(logits.view(-1), labels.view(-1))
+ else:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9025532515b72dc48023a5e0262d3f8f825ad021
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/convert_wavlm_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/convert_wavlm_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17785cd33483f7abbaeae14004a6f220ce339ff6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/convert_wavlm_original_s3prl_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/modeling_wavlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/modeling_wavlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f4041aa127c700fe509454090034ee8bc59bf26
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/wavlm/__pycache__/modeling_wavlm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm b/venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm
new file mode 100644
index 0000000000000000000000000000000000000000..b92fb738f44d43605bbae877a1c06617dbaf96a6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bf1ae1c2e724d4f238fd143696277385a20aab12ea3c107fd5b8749cfd95484b
+size 50678760