diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1859286e4a2152f34abec52051854c66fdfcd83b --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b4e64ca27c88184597a7e9749498dc005de5e5037ce4d97f04fdb8b4548ac4 +size 33555627 diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c11007c507a23756378d6ea75939d792ccfd0c21 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716439886 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..0b245d7f9604ef6094451afe5f1cec858ffea259 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:04:51:27,146 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:04:51:36,123 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:04:51:36,124 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:04:51:36,125 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:04:51:38,444 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8150356038c46ec25f623f6e945d6dcb66a2e717 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..3e663756347179f1e0bba352b66c9149d87874c3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T04:51:26.939641", + "startedAt": "2024-05-23T04:51:26.427629", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2334.34245625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3341.627, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3336.69, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3335.088, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 212.20449829101562 + } + }, + "memory": { + "total": 1007.43798828125 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..9c8bc84b50f55b7655f72c31d4c00a85872741c6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 04:51:26,452 INFO StreamThr :811 [internal.py:wandb_internal():85] W&B internal server running at pid: 811, started at: 2024-05-23 04:51:26.450632 +2024-05-23 04:51:26,457 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status +2024-05-23 04:51:26,458 INFO WriterThread:811 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb +2024-05-23 04:51:26,459 DEBUG SenderThread:811 [sender.py:send():378] send: header +2024-05-23 04:51:26,462 DEBUG SenderThread:811 [sender.py:send():378] send: run +2024-05-23 04:51:26,718 INFO SenderThread:811 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files +2024-05-23 04:51:26,718 INFO SenderThread:811 [sender.py:_start_run_threads():1123] run started: zatipfuv with start time 1716439886.450711 +2024-05-23 04:51:26,725 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 04:51:26,725 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: check_version +2024-05-23 04:51:26,844 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 04:51:26,847 DEBUG HandlerThread:811 [system_info.py:__init__():26] System info init +2024-05-23 04:51:26,847 DEBUG HandlerThread:811 [system_info.py:__init__():41] System info init done +2024-05-23 04:51:26,847 INFO HandlerThread:811 [system_monitor.py:start():194] Starting system monitor +2024-05-23 04:51:26,847 INFO SystemMonitor:811 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 04:51:26,847 INFO HandlerThread:811 [system_monitor.py:probe():214] Collecting system info +2024-05-23 04:51:26,854 INFO SystemMonitor:811 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 04:51:26,854 INFO SystemMonitor:811 [interfaces.py:start():188] Started disk monitoring +2024-05-23 04:51:26,855 INFO SystemMonitor:811 [interfaces.py:start():188] Started memory monitoring +2024-05-23 04:51:26,855 INFO SystemMonitor:811 [interfaces.py:start():188] Started network monitoring +2024-05-23 04:51:26,939 DEBUG HandlerThread:811 [system_info.py:probe():150] Probing system +2024-05-23 04:51:26,942 DEBUG HandlerThread:811 [system_info.py:_probe_git():135] Probing git +2024-05-23 04:51:26,952 ERROR HandlerThread:811 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_info.py:_probe_git():143] Probing git done +2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_info.py:probe():198] Probing system done +2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T04:51:26.939641', 'startedAt': '2024-05-23T04:51:26.427629', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.34245625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3341.627, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3336.69, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3335.088, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.20449829101562}}, 'memory': {'total': 1007.43798828125}} +2024-05-23 04:51:26,953 INFO HandlerThread:811 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 04:51:26,953 INFO HandlerThread:811 [system_monitor.py:probe():227] Publishing system info +2024-05-23 04:51:26,956 INFO HandlerThread:811 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 04:51:26,961 DEBUG SenderThread:811 [sender.py:send():378] send: files +2024-05-23 04:51:26,961 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 04:51:27,141 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 04:51:27,141 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: python_packages +2024-05-23 04:51:27,143 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 04:51:27,143 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: stop_status +2024-05-23 04:51:27,249 DEBUG SenderThread:811 [sender.py:send():378] send: telemetry +2024-05-23 04:51:27,597 INFO wandb-upload_0:811 [upload_job.py:push():130] Uploaded file /tmp/tmp_076n2_ewandb/d4n2m7i8-wandb-metadata.json +2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json +2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt +2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +2024-05-23 04:51:29,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +2024-05-23 04:51:32,250 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:51:37,726 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +2024-05-23 04:51:38,126 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:51:38,457 DEBUG SenderThread:811 [sender.py:send():378] send: exit +2024-05-23 04:51:38,457 INFO SenderThread:811 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 04:51:38,457 INFO SenderThread:811 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_exit():593] send defer +2024-05-23 04:51:38,459 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,459 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 04:51:38,459 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 1 +2024-05-23 04:51:38,459 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,459 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 04:51:38,459 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 04:51:38,460 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 2 +2024-05-23 04:51:38,460 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,460 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 04:51:38,460 INFO HandlerThread:811 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 04:51:38,460 INFO HandlerThread:811 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined network monitor +2024-05-23 04:51:38,462 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,463 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 04:51:38,463 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 3 +2024-05-23 04:51:38,463 DEBUG SenderThread:811 [sender.py:send():378] send: stats +2024-05-23 04:51:38,463 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 04:51:38,464 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 4 +2024-05-23 04:51:38,464 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 04:51:38,464 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 5 +2024-05-23 04:51:38,464 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 04:51:38,465 DEBUG SenderThread:811 [sender.py:send():378] send: summary +2024-05-23 04:51:38,465 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:51:38,466 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 6 +2024-05-23 04:51:38,466 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,466 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 04:51:38,466 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 04:51:38,470 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:51:38,560 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 7 +2024-05-23 04:51:38,560 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:38,560 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 04:51:38,560 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:38,560 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 04:51:38,728 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml +2024-05-23 04:51:38,728 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json +2024-05-23 04:51:39,274 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 8 +2024-05-23 04:51:39,275 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:39,275 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 04:51:39,275 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:39,275 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 04:51:39,275 INFO SenderThread:811 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 04:51:39,275 INFO SenderThread:811 [job_builder.py:_get_source_type():576] no source found +2024-05-23 04:51:39,276 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 9 +2024-05-23 04:51:39,276 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:39,276 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 04:51:39,276 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:39,276 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 04:51:39,276 INFO SenderThread:811 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 04:51:39,457 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files +2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log output.log +2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt requirements.txt +2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json wandb-summary.json +2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json wandb-metadata.json +2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml config.yaml +2024-05-23 04:51:39,732 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 10 +2024-05-23 04:51:39,732 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:51:39,732 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:39,734 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 04:51:39,735 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:39,737 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 04:51:39,737 INFO SenderThread:811 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:51:39,965 INFO wandb-upload_0:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +2024-05-23 04:51:40,294 INFO wandb-upload_2:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json +2024-05-23 04:51:40,329 INFO wandb-upload_1:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt +2024-05-23 04:51:40,330 INFO wandb-upload_3:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml +2024-05-23 04:51:40,458 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:51:40,458 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:51:40,530 INFO Thread-11 (_thread_body):811 [sender.py:transition_state():613] send defer: 11 +2024-05-23 04:51:40,530 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:40,530 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 04:51:40,531 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 04:51:40,531 INFO SenderThread:811 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 12 +2024-05-23 04:51:40,531 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:40,531 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 04:51:40,531 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 04:51:40,531 INFO SenderThread:811 [file_stream.py:finish():601] file stream finish called +2024-05-23 04:51:40,591 INFO SenderThread:811 [file_stream.py:finish():605] file stream finish is done +2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 13 +2024-05-23 04:51:40,591 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:40,591 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 14 +2024-05-23 04:51:40,591 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:51:40,591 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send():378] send: final +2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send():378] send: footer +2024-05-23 04:51:40,592 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer +2024-05-23 04:51:40,592 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 04:51:40,592 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:51:40,592 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 04:51:40,593 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:51:40,593 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: server_info +2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 04:51:40,654 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 04:51:40,655 INFO HandlerThread:811 [handler.py:finish():882] shutting down handler +2024-05-23 04:51:41,593 INFO WriterThread:811 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb +2024-05-23 04:51:41,654 INFO SenderThread:811 [sender.py:finish():1545] shutting down sender +2024-05-23 04:51:41,654 INFO SenderThread:811 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:51:41,654 INFO SenderThread:811 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..04ed270cd0301467b4127af7856a5fe1c30a5190 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Configure stats pid to 655 +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 04:51:26,445 WARNING MainThread:655 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():560] calling init triggers +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():610] starting backend +2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():614] setting up manager +2024-05-23 04:51:26,449 INFO MainThread:655 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 04:51:26,450 INFO MainThread:655 [wandb_init.py:init():622] backend started and connected +2024-05-23 04:51:26,453 INFO MainThread:655 [wandb_init.py:init():711] updated telemetry +2024-05-23 04:51:26,461 INFO MainThread:655 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 04:51:26,725 INFO MainThread:655 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 04:51:26,838 INFO MainThread:655 [wandb_run.py:_on_init():2405] got version response +2024-05-23 04:51:26,838 INFO MainThread:655 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 04:51:27,144 INFO MainThread:655 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 04:51:41,655 WARNING MsgRouterThr:655 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e737016c3cdd5fab7691b9b784565ff9979a11f9 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..afccca3fe6e55f393d4def4be06a9ef74903e321 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 04:55:44,631 INFO StreamThr :1763 [internal.py:wandb_internal():85] W&B internal server running at pid: 1763, started at: 2024-05-23 04:55:44.626261 +2024-05-23 04:55:44,633 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status +2024-05-23 04:55:44,633 INFO WriterThread:1763 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb +2024-05-23 04:55:44,635 DEBUG SenderThread:1763 [sender.py:send():378] send: header +2024-05-23 04:55:44,640 DEBUG SenderThread:1763 [sender.py:send():378] send: run +2024-05-23 04:55:44,913 INFO SenderThread:1763 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files +2024-05-23 04:55:44,913 INFO SenderThread:1763 [sender.py:_start_run_threads():1123] run started: cegnkp0e with start time 1716440144.626722 +2024-05-23 04:55:44,916 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 04:55:44,917 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: check_version +2024-05-23 04:55:45,037 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 04:55:45,039 DEBUG HandlerThread:1763 [system_info.py:__init__():26] System info init +2024-05-23 04:55:45,039 DEBUG HandlerThread:1763 [system_info.py:__init__():41] System info init done +2024-05-23 04:55:45,039 INFO HandlerThread:1763 [system_monitor.py:start():194] Starting system monitor +2024-05-23 04:55:45,039 INFO SystemMonitor:1763 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 04:55:45,039 INFO HandlerThread:1763 [system_monitor.py:probe():214] Collecting system info +2024-05-23 04:55:45,046 INFO SystemMonitor:1763 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 04:55:45,046 INFO SystemMonitor:1763 [interfaces.py:start():188] Started disk monitoring +2024-05-23 04:55:45,048 INFO SystemMonitor:1763 [interfaces.py:start():188] Started memory monitoring +2024-05-23 04:55:45,049 INFO SystemMonitor:1763 [interfaces.py:start():188] Started network monitoring +2024-05-23 04:55:45,111 DEBUG HandlerThread:1763 [system_info.py:probe():150] Probing system +2024-05-23 04:55:45,115 DEBUG HandlerThread:1763 [system_info.py:_probe_git():135] Probing git +2024-05-23 04:55:45,125 ERROR HandlerThread:1763 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_info.py:_probe_git():143] Probing git done +2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_info.py:probe():198] Probing system done +2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T04:55:45.111786', 'startedAt': '2024-05-23T04:55:44.608684', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4999875000003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3392.442, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3368.04, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3117.704, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.19972229003906}}, 'memory': {'total': 1007.43798828125}} +2024-05-23 04:55:45,125 INFO HandlerThread:1763 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 04:55:45,125 INFO HandlerThread:1763 [system_monitor.py:probe():227] Publishing system info +2024-05-23 04:55:45,128 INFO HandlerThread:1763 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 04:55:45,133 DEBUG SenderThread:1763 [sender.py:send():378] send: files +2024-05-23 04:55:45,133 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 04:55:45,305 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 04:55:45,305 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: python_packages +2024-05-23 04:55:45,306 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 04:55:45,307 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: stop_status +2024-05-23 04:55:45,432 DEBUG SenderThread:1763 [sender.py:send():378] send: telemetry +2024-05-23 04:55:45,708 INFO wandb-upload_0:1763 [upload_job.py:push():130] Uploaded file /tmp/tmpmuiczo1owandb/lvic4v80-wandb-metadata.json +2024-05-23 04:55:45,915 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt +2024-05-23 04:55:45,916 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log +2024-05-23 04:55:45,916 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json +2024-05-23 04:55:47,915 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log +2024-05-23 04:55:50,438 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:55:55,768 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:55:55,922 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log +2024-05-23 04:55:56,082 DEBUG SenderThread:1763 [sender.py:send():378] send: exit +2024-05-23 04:55:56,082 INFO SenderThread:1763 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 04:55:56,082 INFO SenderThread:1763 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_exit():593] send defer +2024-05-23 04:55:56,084 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,084 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 04:55:56,084 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 1 +2024-05-23 04:55:56,084 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,084 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 04:55:56,084 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 2 +2024-05-23 04:55:56,085 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,085 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 04:55:56,085 INFO HandlerThread:1763 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined network monitor +2024-05-23 04:55:56,086 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 3 +2024-05-23 04:55:56,086 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,086 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 04:55:56,086 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 4 +2024-05-23 04:55:56,086 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,087 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 04:55:56,087 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,087 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 04:55:56,087 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 5 +2024-05-23 04:55:56,087 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,087 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 04:55:56,087 DEBUG SenderThread:1763 [sender.py:send():378] send: summary +2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 04:55:56,088 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 6 +2024-05-23 04:55:56,088 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,088 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 04:55:56,088 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 04:55:56,093 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 04:55:56,177 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 7 +2024-05-23 04:55:56,177 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:56,177 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 04:55:56,177 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:56,177 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 04:55:56,923 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml +2024-05-23 04:55:56,924 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json +2024-05-23 04:55:57,082 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:55:57,460 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 8 +2024-05-23 04:55:57,460 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:55:57,460 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:57,460 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 04:55:57,460 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:57,460 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 04:55:57,460 INFO SenderThread:1763 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 04:55:57,461 INFO SenderThread:1763 [job_builder.py:_get_source_type():576] no source found +2024-05-23 04:55:57,461 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 9 +2024-05-23 04:55:57,461 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:57,461 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 04:55:57,461 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:57,461 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 04:55:57,461 INFO SenderThread:1763 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log +2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files +2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log output.log +2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt requirements.txt +2024-05-23 04:55:57,928 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json wandb-summary.json +2024-05-23 04:55:57,930 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json wandb-metadata.json +2024-05-23 04:55:57,930 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml config.yaml +2024-05-23 04:55:57,932 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 10 +2024-05-23 04:55:57,932 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:57,932 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 04:55:57,933 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:57,935 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 04:55:57,935 INFO SenderThread:1763 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:55:58,082 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:55:58,083 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:55:58,178 INFO wandb-upload_0:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log +2024-05-23 04:55:58,492 INFO wandb-upload_1:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt +2024-05-23 04:55:58,533 INFO wandb-upload_2:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json +2024-05-23 04:55:58,544 INFO wandb-upload_3:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml +2024-05-23 04:55:58,744 INFO Thread-11 (_thread_body):1763 [sender.py:transition_state():613] send defer: 11 +2024-05-23 04:55:58,744 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:58,744 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 04:55:58,744 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:58,744 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 04:55:58,744 INFO SenderThread:1763 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 04:55:58,745 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 12 +2024-05-23 04:55:58,745 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:58,745 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 04:55:58,745 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:58,745 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 04:55:58,745 INFO SenderThread:1763 [file_stream.py:finish():601] file stream finish called +2024-05-23 04:55:58,805 INFO SenderThread:1763 [file_stream.py:finish():605] file stream finish is done +2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 13 +2024-05-23 04:55:58,805 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:58,805 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 14 +2024-05-23 04:55:58,805 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer +2024-05-23 04:55:58,805 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send():378] send: final +2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send():378] send: footer +2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer +2024-05-23 04:55:58,806 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: server_info +2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 04:55:58,870 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 04:55:58,870 INFO HandlerThread:1763 [handler.py:finish():882] shutting down handler +2024-05-23 04:55:59,807 INFO WriterThread:1763 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb +2024-05-23 04:55:59,870 INFO SenderThread:1763 [sender.py:finish():1545] shutting down sender +2024-05-23 04:55:59,870 INFO SenderThread:1763 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 04:55:59,870 INFO SenderThread:1763 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..29e02d99b321df3881895623dc0a1d7473ce3719 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 04:55:44,620 INFO MainThread:1608 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 04:55:44,620 INFO MainThread:1608 [wandb_setup.py:_flush():76] Configure stats pid to 1608 +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 04:55:44,621 WARNING MainThread:1608 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():560] calling init triggers +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():610] starting backend +2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():614] setting up manager +2024-05-23 04:55:44,625 INFO MainThread:1608 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 04:55:44,626 INFO MainThread:1608 [wandb_init.py:init():622] backend started and connected +2024-05-23 04:55:44,629 INFO MainThread:1608 [wandb_init.py:init():711] updated telemetry +2024-05-23 04:55:44,639 INFO MainThread:1608 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 04:55:44,916 INFO MainThread:1608 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 04:55:45,031 INFO MainThread:1608 [wandb_run.py:_on_init():2405] got version response +2024-05-23 04:55:45,031 INFO MainThread:1608 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 04:55:45,306 INFO MainThread:1608 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 04:55:45,306 INFO MainThread:1608 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 04:55:45,307 INFO MainThread:1608 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 04:55:45,307 INFO MainThread:1608 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 04:55:45,308 INFO MainThread:1608 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 04:55:59,871 WARNING MsgRouterThr:1608 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb new file mode 100644 index 0000000000000000000000000000000000000000..5ead0bc3651f725c7ebcb60f3094ca3ddd14d7a6 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96b93e36f9bc3e195cc3771b752031238ee808ea --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/config.yaml @@ -0,0 +1,33 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717058855 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 13 + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..e68a9253e7b64ad1661fcf783e4e09f69e429a31 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log @@ -0,0 +1,5 @@ + +2024-05-30:08:47:36,676 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:08:47:45,851 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:08:47:45,852 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:08:47:45,852 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/'} diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b21c96113204af90a7f13a98ab7ccbc9f5fa95a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt @@ -0,0 +1,153 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.2 +identify==2.5.35 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.15.2 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.36.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..5f9ed49c5ea8db84ce740ca66b5b03b49e7c20c6 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T08:47:36.464779", + "startedAt": "2024-05-30T08:47:35.929688", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step10000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-5-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.43190625, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 172.39404296875 + } + }, + "memory": { + "total": 1007.4379730224609 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..28172d02a7bffcb287fc0ef00c1a47c1e66762cb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log @@ -0,0 +1,49 @@ +2024-05-30 08:47:35,952 INFO StreamThr :904 [internal.py:wandb_internal():85] W&B internal server running at pid: 904, started at: 2024-05-30 08:47:35.950613 +2024-05-30 08:47:35,956 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status +2024-05-30 08:47:35,958 INFO WriterThread:904 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb +2024-05-30 08:47:35,960 DEBUG SenderThread:904 [sender.py:send():378] send: header +2024-05-30 08:47:35,963 DEBUG SenderThread:904 [sender.py:send():378] send: run +2024-05-30 08:47:36,265 INFO SenderThread:904 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files +2024-05-30 08:47:36,265 INFO SenderThread:904 [sender.py:_start_run_threads():1123] run started: wtsee6ig with start time 1717058855.951258 +2024-05-30 08:47:36,269 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 08:47:36,269 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: check_version +2024-05-30 08:47:36,386 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 08:47:36,389 DEBUG HandlerThread:904 [system_info.py:__init__():26] System info init +2024-05-30 08:47:36,389 DEBUG HandlerThread:904 [system_info.py:__init__():41] System info init done +2024-05-30 08:47:36,389 INFO HandlerThread:904 [system_monitor.py:start():194] Starting system monitor +2024-05-30 08:47:36,389 INFO SystemMonitor:904 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 08:47:36,389 INFO HandlerThread:904 [system_monitor.py:probe():214] Collecting system info +2024-05-30 08:47:36,396 INFO SystemMonitor:904 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 08:47:36,397 INFO SystemMonitor:904 [interfaces.py:start():188] Started disk monitoring +2024-05-30 08:47:36,402 INFO SystemMonitor:904 [interfaces.py:start():188] Started memory monitoring +2024-05-30 08:47:36,403 INFO SystemMonitor:904 [interfaces.py:start():188] Started network monitoring +2024-05-30 08:47:36,464 DEBUG HandlerThread:904 [system_info.py:probe():150] Probing system +2024-05-30 08:47:36,468 DEBUG HandlerThread:904 [system_info.py:_probe_git():135] Probing git +2024-05-30 08:47:36,477 ERROR HandlerThread:904 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 08:47:36,477 DEBUG HandlerThread:904 [system_info.py:_probe_git():143] Probing git done +2024-05-30 08:47:36,477 DEBUG HandlerThread:904 [system_info.py:probe():198] Probing system done +2024-05-30 08:47:36,478 DEBUG HandlerThread:904 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T08:47:36.464779', 'startedAt': '2024-05-30T08:47:35.929688', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step10000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-5-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.43190625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 172.39404296875}}, 'memory': {'total': 1007.4379730224609}} +2024-05-30 08:47:36,478 INFO HandlerThread:904 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 08:47:36,478 INFO HandlerThread:904 [system_monitor.py:probe():227] Publishing system info +2024-05-30 08:47:36,482 INFO HandlerThread:904 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 08:47:36,489 DEBUG SenderThread:904 [sender.py:send():378] send: files +2024-05-30 08:47:36,489 INFO SenderThread:904 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 08:47:36,671 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 08:47:36,671 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: python_packages +2024-05-30 08:47:36,673 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 08:47:36,673 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: stop_status +2024-05-30 08:47:36,785 DEBUG SenderThread:904 [sender.py:send():378] send: telemetry +2024-05-30 08:47:37,119 INFO wandb-upload_0:904 [upload_job.py:push():130] Uploaded file /tmp/tmp7qct_09uwandb/yu5q9u35-wandb-metadata.json +2024-05-30 08:47:37,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json +2024-05-30 08:47:37,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log +2024-05-30 08:47:37,268 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt +2024-05-30 08:47:39,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log +2024-05-30 08:47:41,809 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 08:47:46,853 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 08:47:47,274 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log +2024-05-30 08:47:49,283 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..3243794ae63e7534af4408daf9ebc4ee7ab6f9b3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log @@ -0,0 +1,28 @@ +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Configure stats pid to 748 +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 08:47:35,945 WARNING MainThread:748 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():560] calling init triggers +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():610] starting backend +2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():614] setting up manager +2024-05-30 08:47:35,949 INFO MainThread:748 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 08:47:35,951 INFO MainThread:748 [wandb_init.py:init():622] backend started and connected +2024-05-30 08:47:35,954 INFO MainThread:748 [wandb_init.py:init():711] updated telemetry +2024-05-30 08:47:35,962 INFO MainThread:748 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 08:47:36,268 INFO MainThread:748 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 08:47:36,380 INFO MainThread:748 [wandb_run.py:_on_init():2405] got version response +2024-05-30 08:47:36,380 INFO MainThread:748 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 08:47:36,674 INFO MainThread:748 [wandb_init.py:init():838] run started, returning control to user process diff --git a/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb b/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz new file mode 100644 index 0000000000000000000000000000000000000000..7cc0ad77a184883b0268bffd1e2f3d195ae99f6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1abdb3ff5b555831f51f7ff00951e66a49277fc2aa787293f18cf8775be65023 +size 794 diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b48fe07311c1ef814c506db38693e22e71b704f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_audio_spectrogram_transformer": [ + "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ASTConfig", + ], + "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_audio_spectrogram_transformer"] = [ + "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "ASTForAudioClassification", + "ASTModel", + "ASTPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_audio_spectrogram_transformer import ( + AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, + ASTConfig, + ) + from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_audio_spectrogram_transformer import ( + AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + ASTForAudioClassification, + ASTModel, + ASTPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7494ab480cd3c3e175c5e8333877161df9902e64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5472a4e96676fb2b5aaeca3ad0040f245d110a98 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41d90302edbdd516e03bb6c323b5f6faf066ba55 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c272c7ea3d67518cdf484fdfc260d9cf79bba405 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..122973312fc2cc9c0d4e179b98dd645498ea00cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..94a7af6006fd7ddcdcb2819ea1dab321696198da --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Audio Spectogram Transformer (AST) model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class ASTConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the AST + [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + frequency_stride (`int`, *optional*, defaults to 10): + Frequency stride to use when patchifying the spectrograms. + time_stride (`int`, *optional*, defaults to 10): + Temporal stride to use when patchifying the spectrograms. + max_length (`int`, *optional*, defaults to 1024): + Temporal dimension of the spectrograms. + num_mel_bins (`int`, *optional*, defaults to 128): + Frequency dimension of the spectrograms (number of Mel-frequency bins). + + Example: + + ```python + >>> from transformers import ASTConfig, ASTModel + + >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration + >>> configuration = ASTConfig() + + >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration + >>> model = ASTModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "audio-spectrogram-transformer" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-12, + patch_size=16, + qkv_bias=True, + frequency_stride=10, + time_stride=10, + max_length=1024, + num_mel_bins=128, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.patch_size = patch_size + self.qkv_bias = qkv_bias + self.frequency_stride = frequency_stride + self.time_stride = time_stride + self.max_length = max_length + self.num_mel_bins = num_mel_bins diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..32e0f33d04fdb242bc4fc37c3906dee90510ebc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast""" + + +import argparse +import json +from pathlib import Path + +import torch +import torchaudio +from datasets import load_dataset +from huggingface_hub import hf_hub_download + +from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def get_audio_spectrogram_transformer_config(model_name): + config = ASTConfig() + + if "10-10" in model_name: + pass + elif "speech-commands" in model_name: + config.max_length = 128 + elif "12-12" in model_name: + config.time_stride = 12 + config.frequency_stride = 12 + elif "14-14" in model_name: + config.time_stride = 14 + config.frequency_stride = 14 + elif "16-16" in model_name: + config.time_stride = 16 + config.frequency_stride = 16 + else: + raise ValueError("Model not supported") + + repo_id = "huggingface/label-files" + if "speech-commands" in model_name: + config.num_labels = 35 + filename = "speech-commands-v2-id2label.json" + else: + config.num_labels = 527 + filename = "audioset-id2label.json" + + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + return config + + +def rename_key(name): + if "module.v" in name: + name = name.replace("module.v", "audio_spectrogram_transformer") + if "cls_token" in name: + name = name.replace("cls_token", "embeddings.cls_token") + if "dist_token" in name: + name = name.replace("dist_token", "embeddings.distillation_token") + if "pos_embed" in name: + name = name.replace("pos_embed", "embeddings.position_embeddings") + if "patch_embed.proj" in name: + name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") + # transformer blocks + if "blocks" in name: + name = name.replace("blocks", "encoder.layer") + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name: + name = name.replace("attn", "attention.self") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + # final layernorm + if "audio_spectrogram_transformer.norm" in name: + name = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm") + # classifier head + if "module.mlp_head.0" in name: + name = name.replace("module.mlp_head.0", "classifier.layernorm") + if "module.mlp_head.1" in name: + name = name.replace("module.mlp_head.1", "classifier.dense") + + return name + + +def convert_state_dict(orig_state_dict, config): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "qkv" in key: + key_split = key.split(".") + layer_num = int(key_split[3]) + dim = config.hidden_size + if "weight" in key: + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.weight" + ] = val[:dim, :] + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.weight" + ] = val[dim : dim * 2, :] + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.weight" + ] = val[-dim:, :] + else: + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.bias" + ] = val[:dim] + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.bias" + ] = val[dim : dim * 2] + orig_state_dict[ + f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.bias" + ] = val[-dim:] + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +def remove_keys(state_dict): + ignore_keys = [ + "module.v.head.weight", + "module.v.head.bias", + "module.v.head_dist.weight", + "module.v.head_dist.bias", + ] + for k in ignore_keys: + state_dict.pop(k, None) + + +@torch.no_grad() +def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): + """ + Copy/paste/tweak model's weights to our Audio Spectrogram Transformer structure. + """ + config = get_audio_spectrogram_transformer_config(model_name) + + model_name_to_url = { + "ast-finetuned-audioset-10-10-0.4593": ( + "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" + ), + "ast-finetuned-audioset-10-10-0.450": ( + "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" + ), + "ast-finetuned-audioset-10-10-0.448": ( + "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" + ), + "ast-finetuned-audioset-10-10-0.448-v2": ( + "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" + ), + "ast-finetuned-audioset-12-12-0.447": ( + "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" + ), + "ast-finetuned-audioset-14-14-0.443": ( + "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" + ), + "ast-finetuned-audioset-16-16-0.442": ( + "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" + ), + "ast-finetuned-speech-commands-v2": ( + "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" + ), + } + + # load original state_dict + checkpoint_url = model_name_to_url[model_name] + state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") + # remove some keys + remove_keys(state_dict) + # rename some keys + new_state_dict = convert_state_dict(state_dict, config) + + # load 🤗 model + model = ASTForAudioClassification(config) + model.eval() + + model.load_state_dict(new_state_dict) + + # verify outputs on dummy input + # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 + mean = -4.2677393 if "speech-commands" not in model_name else -6.845978 + std = 4.5689974 if "speech-commands" not in model_name else 5.5654526 + max_length = 1024 if "speech-commands" not in model_name else 128 + feature_extractor = ASTFeatureExtractor(mean=mean, std=std, max_length=max_length) + + if "speech-commands" in model_name: + dataset = load_dataset("speech_commands", "v0.02", split="validation") + waveform = dataset[0]["audio"]["array"] + else: + filepath = hf_hub_download( + repo_id="nielsr/audio-spectogram-transformer-checkpoint", + filename="sample_audio.flac", + repo_type="dataset", + ) + + waveform, _ = torchaudio.load(filepath) + waveform = waveform.squeeze().numpy() + + inputs = feature_extractor(waveform, sampling_rate=16000, return_tensors="pt") + + # forward pass + outputs = model(**inputs) + logits = outputs.logits + + if model_name == "ast-finetuned-audioset-10-10-0.4593": + expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]) + elif model_name == "ast-finetuned-audioset-10-10-0.450": + expected_slice = torch.tensor([-1.1986, -7.0903, -8.2718]) + elif model_name == "ast-finetuned-audioset-10-10-0.448": + expected_slice = torch.tensor([-2.6128, -8.0080, -9.4344]) + elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": + expected_slice = torch.tensor([-1.5080, -7.4534, -8.8917]) + elif model_name == "ast-finetuned-audioset-12-12-0.447": + expected_slice = torch.tensor([-0.5050, -6.5833, -8.0843]) + elif model_name == "ast-finetuned-audioset-14-14-0.443": + expected_slice = torch.tensor([-0.3826, -7.0336, -8.2413]) + elif model_name == "ast-finetuned-audioset-16-16-0.442": + expected_slice = torch.tensor([-1.2113, -6.9101, -8.3470]) + elif model_name == "ast-finetuned-speech-commands-v2": + expected_slice = torch.tensor([6.1589, -8.0566, -8.7984]) + else: + raise ValueError("Unknown model name") + if not torch.allclose(logits[0, :3], expected_slice, atol=1e-4): + raise ValueError("Logits don't match") + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving feature extractor to {pytorch_dump_folder_path}") + feature_extractor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print("Pushing model and feature extractor to the hub...") + model.push_to_hub(f"MIT/{model_name}") + feature_extractor.push_to_hub(f"MIT/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="ast-finetuned-audioset-10-10-0.4593", + type=str, + help="Name of the Audio Spectrogram Transformer model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2bd122b4098c360a04e96531bcf8f68c8a4d980f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py @@ -0,0 +1,236 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Feature extractor class for Audio Spectrogram Transformer. +""" + +from typing import List, Optional, Union + +import numpy as np + +from ...audio_utils import mel_filter_bank, spectrogram, window_function +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import TensorType, is_speech_available, is_torch_available, logging + + +if is_speech_available(): + import torchaudio.compliance.kaldi as ta_kaldi + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +class ASTFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a Audio Spectrogram Transformer (AST) feature extractor. + + This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains + most of the main methods. Users should refer to this superclass for more information regarding those methods. + + This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy + otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation. + + Args: + feature_size (`int`, *optional*, defaults to 1): + The feature dimension of the extracted features. + sampling_rate (`int`, *optional*, defaults to 16000): + The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). + num_mel_bins (`int`, *optional*, defaults to 128): + Number of Mel-frequency bins. + max_length (`int`, *optional*, defaults to 1024): + Maximum length to which to pad/truncate the extracted features. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the log-Mel features using `mean` and `std`. + mean (`float`, *optional*, defaults to -4.2677393): + The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default. + std (`float`, *optional*, defaults to 4.5689974): + The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation + by default. + return_attention_mask (`bool`, *optional*, defaults to `False`): + Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`. + """ + + model_input_names = ["input_values", "attention_mask"] + + def __init__( + self, + feature_size=1, + sampling_rate=16000, + num_mel_bins=128, + max_length=1024, + padding_value=0.0, + do_normalize=True, + mean=-4.2677393, + std=4.5689974, + return_attention_mask=False, + **kwargs, + ): + super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) + self.num_mel_bins = num_mel_bins + self.max_length = max_length + self.do_normalize = do_normalize + self.mean = mean + self.std = std + self.return_attention_mask = return_attention_mask + + if not is_speech_available(): + mel_filters = mel_filter_bank( + num_frequency_bins=256, + num_mel_filters=self.num_mel_bins, + min_frequency=20, + max_frequency=sampling_rate // 2, + sampling_rate=sampling_rate, + norm=None, + mel_scale="kaldi", + triangularize_in_mel_space=True, + ) + + self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0))) + self.window = window_function(400, "hann", periodic=False) + + def _extract_fbank_features( + self, + waveform: np.ndarray, + max_length: int, + ) -> np.ndarray: + """ + Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs + and hence the waveform should not be normalized before feature extraction. + """ + # waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers + if is_speech_available(): + waveform = torch.from_numpy(waveform).unsqueeze(0) + fbank = ta_kaldi.fbank( + waveform, + sample_frequency=self.sampling_rate, + window_type="hanning", + num_mel_bins=self.num_mel_bins, + ) + else: + waveform = np.squeeze(waveform) + fbank = spectrogram( + waveform, + self.window, + frame_length=400, + hop_length=160, + fft_length=512, + power=2.0, + center=False, + preemphasis=0.97, + mel_filters=self.mel_filters, + log_mel="log", + mel_floor=1.192092955078125e-07, + remove_dc_offset=True, + ).T + + fbank = torch.from_numpy(fbank) + + n_frames = fbank.shape[0] + difference = max_length - n_frames + + # pad or truncate, depending on difference + if difference > 0: + pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference)) + fbank = pad_module(fbank) + elif difference < 0: + fbank = fbank[0:max_length, :] + + fbank = fbank.numpy() + + return fbank + + def normalize(self, input_values: np.ndarray) -> np.ndarray: + return (input_values - (self.mean)) / (self.std * 2) + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + sampling_rate: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not + stereo, i.e. single float per timestep. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + """ + + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" + f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" + f" {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + + is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 + if is_batched_numpy and len(raw_speech.shape) > 2: + raise ValueError(f"Only mono-channel audio is supported for input to {self}") + is_batched = is_batched_numpy or ( + isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float32) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float32) + + # always return batch + if not is_batched: + raw_speech = [raw_speech] + + # extract fbank features and pad/truncate to max_length + features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech] + + # convert into BatchFeature + padded_inputs = BatchFeature({"input_values": features}) + + # make sure list is in array format + input_values = padded_inputs.get("input_values") + if isinstance(input_values[0], list): + padded_inputs["input_values"] = [np.asarray(feature, dtype=np.float32) for feature in input_values] + + # normalization + if self.do_normalize: + padded_inputs["input_values"] = [self.normalize(feature) for feature in input_values] + + if return_tensors is not None: + padded_inputs = padded_inputs.convert_to_tensors(return_tensors) + + return padded_inputs diff --git a/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..5ec18e2c7f16b2065f3af3dc9fba51d89881f85a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py @@ -0,0 +1,613 @@ +# coding=utf-8 +# Copyright 2022 MIT and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Audio Spectrogram Transformer (AST) model.""" + +import math +from typing import Dict, List, Optional, Set, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_audio_spectrogram_transformer import ASTConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "ASTConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "MIT/ast-finetuned-audioset-10-10-0.4593" +_EXPECTED_OUTPUT_SHAPE = [1, 1214, 768] + +# Audio classification docstring +_SEQ_CLASS_CHECKPOINT = "MIT/ast-finetuned-audioset-10-10-0.4593" +_SEQ_CLASS_EXPECTED_OUTPUT = "'Speech'" +_SEQ_CLASS_EXPECTED_LOSS = 0.17 + + +from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class ASTEmbeddings(nn.Module): + """ + Construct the CLS token, position and patch embeddings. + """ + + def __init__(self, config: ASTConfig) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + self.patch_embeddings = ASTPatchEmbeddings(config) + + frequency_out_dimension, time_out_dimension = self.get_shape(config) + num_patches = frequency_out_dimension * time_out_dimension + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def get_shape(self, config): + # see Karpathy's cs231n blog on how to calculate the output dimensions + # https://cs231n.github.io/convolutional-networks/#conv + frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1 + time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1 + + return frequency_out_dimension, time_out_dimension + + def forward(self, input_values: torch.Tensor) -> torch.Tensor: + batch_size = input_values.shape[0] + embeddings = self.patch_embeddings(input_values) + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + distillation_tokens = self.distillation_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1) + embeddings = embeddings + self.position_embeddings + embeddings = self.dropout(embeddings) + + return embeddings + + +class ASTPatchEmbeddings(nn.Module): + """ + This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, + seq_length, hidden_size)` to be consumed by a Transformer. + """ + + def __init__(self, config): + super().__init__() + + patch_size = config.patch_size + frequency_stride = config.frequency_stride + time_stride = config.time_stride + + self.projection = nn.Conv2d( + 1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride) + ) + + def forward(self, input_values: torch.Tensor) -> torch.Tensor: + input_values = input_values.unsqueeze(1) + input_values = input_values.transpose(2, 3) + embeddings = self.projection(input_values).flatten(2).transpose(1, 2) + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->AST +class ASTSelfAttention(nn.Module): + def __init__(self, config: ASTConfig) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->AST +class ASTSelfOutput(nn.Module): + """ + The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->AST +class ASTAttention(nn.Module): + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.attention = ASTSelfAttention(config) + self.output = ASTSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads: Set[int]) -> None: + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_outputs = self.attention(hidden_states, head_mask, output_attentions) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->AST +class ASTIntermediate(nn.Module): + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->AST +class ASTOutput(nn.Module): + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + hidden_states = hidden_states + input_tensor + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->AST +class ASTLayer(nn.Module): + """This corresponds to the Block class in the timm implementation.""" + + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ASTAttention(config) + self.intermediate = ASTIntermediate(config) + self.output = ASTOutput(config) + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in AST, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + # first residual connection + hidden_states = attention_output + hidden_states + + # in AST, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + + # second residual connection is done here + layer_output = self.output(layer_output, hidden_states) + + outputs = (layer_output,) + outputs + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->AST +class ASTEncoder(nn.Module): + def __init__(self, config: ASTConfig) -> None: + super().__init__() + self.config = config + self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + layer_head_mask, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ASTPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ASTConfig + base_model_prefix = "audio_spectrogram_transformer" + main_input_name = "input_values" + supports_gradient_checkpointing = True + + # Copied from transformers.models.deit.modeling_deit.DeiTPreTrainedModel._init_weights + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_( + module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range + ).to(module.weight.dtype) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ASTConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING = r""" + Args: + input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`): + Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by + loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via + the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the + [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a + tensor of type `torch.FloatTensor`. See [`~ASTFeatureExtractor.__call__`] + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare AST Model transformer outputting raw hidden-states without any specific head on top.", + AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING, +) +class ASTModel(ASTPreTrainedModel): + def __init__(self, config: ASTConfig) -> None: + super().__init__(config) + self.config = config + + self.embeddings = ASTEmbeddings(config) + self.encoder = ASTEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> ASTPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPooling, + config_class=_CONFIG_FOR_DOC, + modality="audio", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_values is None: + raise ValueError("You have to specify input_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings(input_values) + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + + pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2 + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class ASTMLPHead(nn.Module): + def __init__(self, config: ASTConfig): + super().__init__() + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + + def forward(self, hidden_state): + hidden_state = self.layernorm(hidden_state) + hidden_state = self.dense(hidden_state) + return hidden_state + + +@add_start_docstrings( + """ + Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled + output) e.g. for datasets like AudioSet, Speech Commands v2. + """, + AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING, +) +class ASTForAudioClassification(ASTPreTrainedModel): + def __init__(self, config: ASTConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.audio_spectrogram_transformer = ASTModel(config) + + # Classifier head + self.classifier = ASTMLPHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_SEQ_CLASS_CHECKPOINT, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + modality="audio", + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the audio classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.audio_spectrogram_transformer( + input_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..868c46616e9b330ebbf9dcdff44ead1795edf4d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/__init__.py @@ -0,0 +1,183 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_tokenizers_available, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_clip": [ + "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CLIPConfig", + "CLIPOnnxConfig", + "CLIPTextConfig", + "CLIPVisionConfig", + ], + "processing_clip": ["CLIPProcessor"], + "tokenization_clip": ["CLIPTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_clip_fast"] = ["CLIPTokenizerFast"] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_clip"] = ["CLIPFeatureExtractor"] + _import_structure["image_processing_clip"] = ["CLIPImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_clip"] = [ + "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "CLIPModel", + "CLIPPreTrainedModel", + "CLIPTextModel", + "CLIPTextModelWithProjection", + "CLIPVisionModel", + "CLIPVisionModelWithProjection", + "CLIPForImageClassification", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_clip"] = [ + "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFCLIPModel", + "TFCLIPPreTrainedModel", + "TFCLIPTextModel", + "TFCLIPVisionModel", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_clip"] = [ + "FlaxCLIPModel", + "FlaxCLIPPreTrainedModel", + "FlaxCLIPTextModel", + "FlaxCLIPTextPreTrainedModel", + "FlaxCLIPTextModelWithProjection", + "FlaxCLIPVisionModel", + "FlaxCLIPVisionPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_clip import ( + CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + CLIPConfig, + CLIPOnnxConfig, + CLIPTextConfig, + CLIPVisionConfig, + ) + from .processing_clip import CLIPProcessor + from .tokenization_clip import CLIPTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_clip_fast import CLIPTokenizerFast + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_clip import CLIPFeatureExtractor + from .image_processing_clip import CLIPImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_clip import ( + CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + CLIPForImageClassification, + CLIPModel, + CLIPPreTrainedModel, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPVisionModel, + CLIPVisionModelWithProjection, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_clip import ( + TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCLIPModel, + TFCLIPPreTrainedModel, + TFCLIPTextModel, + TFCLIPVisionModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_clip import ( + FlaxCLIPModel, + FlaxCLIPPreTrainedModel, + FlaxCLIPTextModel, + FlaxCLIPTextModelWithProjection, + FlaxCLIPTextPreTrainedModel, + FlaxCLIPVisionModel, + FlaxCLIPVisionPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c0932b241584a3752a94f011d7cd31528dcfb0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbf5bb7afd712ad2bd74bb8c354b102f7241c6c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/convert_clip_original_pytorch_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/convert_clip_original_pytorch_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3076e06f58e00489f34977dbde8489da4a892ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/convert_clip_original_pytorch_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66cb29f401913b4f02732c21c13678a87a4677df Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f85d3c170ca6bc8bd917f58d6135ef8e88074239 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f15f7cb478340b2fdd4368c201ec87b7d635da8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03947f2c9a74062adf01000a9ad9b76f37793eb6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..421e1d6104b058cc836baf19cc743bddd239b7a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02ee1678662b334870f3a8c822bb53c68d70ae09 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d02f429d17260b4e43507eee3e22c7354aeabe53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..a48cb73a9715ba85f4081df6d15be3c2a5a9c9b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py @@ -0,0 +1,456 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" CLIP model configuration""" + +import os +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union + + +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class CLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP + text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the text encoder of the CLIP + [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 49408): + Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`CLIPModel`]. + hidden_size (`int`, *optional*, defaults to 512): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + max_position_embeddings (`int`, *optional*, defaults to 77): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + pad_token_id (`int`, *optional*, defaults to 1): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 49406): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 49407): + End of stream token id. + + Example: + + ```python + >>> from transformers import CLIPTextConfig, CLIPTextModel + + >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration + >>> configuration = CLIPTextConfig() + + >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration + >>> model = CLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clip_text_model" + + def __init__( + self, + vocab_size=49408, + hidden_size=512, + intermediate_size=2048, + projection_dim=512, + num_hidden_layers=12, + num_attention_heads=8, + max_position_embeddings=77, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + # This differs from `CLIPTokenizer`'s default and from openai/clip + # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538 + pad_token_id=1, + bos_token_id=49406, + eos_token_id=49407, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.max_position_embeddings = max_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the text config dict if we are loading from CLIPConfig + if config_dict.get("model_type") == "clip": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class CLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a + CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP + [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 32): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + + Example: + + ```python + >>> from transformers import CLIPVisionConfig, CLIPVisionModel + + >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration + >>> configuration = CLIPVisionConfig() + + >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration + >>> model = CLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "clip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + projection_dim=512, + num_hidden_layers=12, + num_attention_heads=12, + num_channels=3, + image_size=224, + patch_size=32, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from CLIPConfig + if config_dict.get("model_type") == "clip": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class CLIPConfig(PretrainedConfig): + r""" + [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate + a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating + a configuration with the defaults will yield a similar configuration to that of the CLIP + [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`CLIPTextConfig`]. + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`CLIPVisionConfig`]. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import CLIPConfig, CLIPModel + + >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration + >>> configuration = CLIPConfig() + + >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration + >>> model = CLIPModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig + >>> from transformers import CLIPTextConfig, CLIPVisionConfig + + >>> # Initializing a CLIPText and CLIPVision configuration + >>> config_text = CLIPTextConfig() + >>> config_vision = CLIPVisionConfig() + + >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision) + ```""" + + model_type = "clip" + + def __init__( + self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs + ): + # If `_config_dict` exist, we use them for the backward compatibility. + # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot + # of confusion!). + text_config_dict = kwargs.pop("text_config_dict", None) + vision_config_dict = kwargs.pop("vision_config_dict", None) + + super().__init__(**kwargs) + + # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in + # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most + # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. + if text_config_dict is not None: + if text_config is None: + text_config = {} + + # This is the complete result when using `text_config_dict`. + _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict() + + # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. + for key, value in _text_config_dict.items(): + if key in text_config and value != text_config[key] and key not in ["transformers_version"]: + # If specified in `text_config_dict` + if key in text_config_dict: + message = ( + f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " + f'The value `text_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The " + f'value `text_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `text_config` with the ones in `_text_config_dict`. + text_config.update(_text_config_dict) + + if vision_config_dict is not None: + if vision_config is None: + vision_config = {} + + # This is the complete result when using `vision_config_dict`. + _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict() + # convert keys to string instead of integer + if "id2label" in _vision_config_dict: + _vision_config_dict["id2label"] = { + str(key): value for key, value in _vision_config_dict["id2label"].items() + } + + # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. + for key, value in _vision_config_dict.items(): + if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: + # If specified in `vision_config_dict` + if key in vision_config_dict: + message = ( + f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " + f'values. The value `vision_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. " + f'The value `vision_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `vision_config` with the ones in `_vision_config_dict`. + vision_config.update(_vision_config_dict) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.") + + if vision_config is None: + vision_config = {} + logger.info("`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.") + + self.text_config = CLIPTextConfig(**text_config) + self.vision_config = CLIPVisionConfig(**vision_config) + + self.projection_dim = projection_dim + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = 1.0 + + @classmethod + def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs): + r""" + Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model + configuration. + + Returns: + [`CLIPConfig`]: An instance of a configuration object + """ + + return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) + + +class CLIPOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.image_processor, batch_size=batch_size, framework=framework + ) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/convert_clip_original_pytorch_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/clip/convert_clip_original_pytorch_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..2127da4f6cf90274b76b20ec6c6c3d6247538cd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/convert_clip_original_pytorch_to_hf.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch +from clip import load + +from transformers import CLIPConfig, CLIPModel + + +def copy_attn_layer(hf_attn_layer, pt_attn_layer): + q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0) + q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0) + + out_proj_weights = pt_attn_layer.out_proj.weight + out_proj_bias = pt_attn_layer.out_proj.bias + + hf_attn_layer.q_proj.weight.data = q_proj + hf_attn_layer.q_proj.bias.data = q_proj_bias + + hf_attn_layer.k_proj.weight.data = k_proj + hf_attn_layer.k_proj.bias.data = k_proj_bias + + hf_attn_layer.v_proj.weight.data = v_proj + hf_attn_layer.v_proj.bias.data = v_proj_bias + + hf_attn_layer.out_proj.weight = out_proj_weights + hf_attn_layer.out_proj.bias = out_proj_bias + + +def copy_mlp(hf_mlp, pt_mlp): + copy_linear(hf_mlp.fc1, pt_mlp.c_fc) + copy_linear(hf_mlp.fc2, pt_mlp.c_proj) + + +def copy_linear(hf_linear, pt_linear): + hf_linear.weight = pt_linear.weight + hf_linear.bias = pt_linear.bias + + +def copy_layer(hf_layer, pt_layer): + # copy layer norms + copy_linear(hf_layer.layer_norm1, pt_layer.ln_1) + copy_linear(hf_layer.layer_norm2, pt_layer.ln_2) + + # copy MLP + copy_mlp(hf_layer.mlp, pt_layer.mlp) + + # copy attn + copy_attn_layer(hf_layer.self_attn, pt_layer.attn) + + +def copy_layers(hf_layers, pt_layers): + for hf_layer, pt_layer in zip(hf_layers, pt_layers): + copy_layer(hf_layer, pt_layer) + + +def copy_encoder(hf_encoder, pt_model): + # copy embeds + hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight + hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding + + # copy layer norm + copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) + + # copy hidden layers + copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks) + + +def copy_text_model_and_projection(hf_model, pt_model): + # copy projection + hf_model.text_projection.weight.data = pt_model.text_projection.data.T + + # copy text encoder + copy_encoder(hf_model.text_model, pt_model) + + +def copy_vison_model_and_projection(hf_model, pt_model): + # copy projection + hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T + + # copy layer norms + copy_linear(hf_model.vision_model.pre_layrnorm, pt_model.visual.ln_pre) + copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post) + + # copy embeds + hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data + hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding + hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data + + # copy encoder + copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks) + + +@torch.no_grad() +def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None): + """ + Copy/paste/tweak model's weights to transformers design. + """ + if config_path is not None: + config = CLIPConfig.from_pretrained(config_path) + else: + config = CLIPConfig(projection_dim=512, text_config={}, vision_config={}) + + hf_model = CLIPModel(config).eval() + + pt_model, _ = load(checkpoint_path, device="cpu", jit=False) + pt_model = pt_model.eval() + + copy_text_model_and_projection(hf_model, pt_model) + copy_vison_model_and_projection(hf_model, pt_model) + hf_model.logit_scale = pt_model.logit_scale + + input_ids = torch.arange(0, 77).unsqueeze(0) + pixel_values = torch.randn(1, 3, 224, 224) + + hf_outputs = hf_model(input_ids=input_ids, pixel_values=pixel_values, return_dict=True) + hf_logits_per_image = hf_outputs.logits_per_image + hf_logits_per_text = hf_outputs.logits_per_text + pt_logits_per_image, pt_logits_per_text = pt_model(pixel_values, input_ids) + + assert torch.allclose(hf_logits_per_image, pt_logits_per_image, atol=1e-3) + assert torch.allclose(hf_logits_per_text, pt_logits_per_text, atol=1e-3) + + hf_model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") + parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") + args = parser.parse_args() + + convert_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..5696a63abe621e360b7e681b86454faa302c4a78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for CLIP.""" + +import warnings + +from ...utils import logging +from .image_processing_clip import CLIPImageProcessor + + +logger = logging.get_logger(__name__) + + +class CLIPFeatureExtractor(CLIPImageProcessor): + def __init__(self, *args, **kwargs) -> None: + warnings.warn( + "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" + " use CLIPImageProcessor instead.", + FutureWarning, + ) + super().__init__(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..fd2f8b3d532bbbc5a6b5c385b33d0c0b67518b98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py @@ -0,0 +1,346 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for CLIP.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + convert_to_rgb, + get_resize_output_image_size, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import TensorType, is_vision_available, logging + + +logger = logging.get_logger(__name__) + + +if is_vision_available(): + import PIL + + +class CLIPImageProcessor(BaseImageProcessor): + r""" + Constructs a CLIP image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): + Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the + `preprocess` method. + crop_size (`Dict[str, int]` *optional*, defaults to 224): + Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` + method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in + the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` + method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"shortest_edge": 224} + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} + crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + self._valid_processor_keys = [ + "images", + "do_resize", + "size", + "resample", + "do_center_crop", + "crop_size", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "do_convert_rgb", + "return_tensors", + "data_format", + "input_data_format", + ] + + # for backwards compatibility of KOSMOS-2 + if "use_square_size" in kwargs: + self.size = {"height": size["shortest_edge"], "width": size["shortest_edge"]} + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge + resized to keep the input aspect ratio. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + default_to_square = True + if "shortest_edge" in size: + size = size["shortest_edge"] + default_to_square = False + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + else: + raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") + + output_size = get_resize_output_image_size( + image, + size=size, + default_to_square=default_to_square, + input_data_format=input_data_format, + ) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def preprocess( + self, + images: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: int = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with + the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): + Whether to center crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to + `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, param_name="size", default_to_square=False) + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_center_crop=do_center_crop, + crop_size=crop_size, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_center_crop: + images = [ + self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..a4ce51625ebf761ef094845cfe7cf17f0a845517 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py @@ -0,0 +1,1416 @@ +# coding=utf-8 +# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch CLIP model.""" + + +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "CLIPConfig" +_CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32" + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "openai/clip-vit-base-patch32" +_IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_0" + + +from ..deprecated._archive_maps import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/2021-03-07-clip.html +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +def clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +class CLIPVisionModelOutput(ModelOutput): + """ + Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. + + Args: + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + image_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CLIPTextModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + text_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class CLIPOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`]. + text_model_output(`BaseModelOutputWithPooling`): + The output of the [`CLIPTextModel`]. + vision_model_output(`BaseModelOutputWithPooling`): + The output of the [`CLIPVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPooling = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class CLIPVisionEmbeddings(nn.Module): + def __init__(self, config: CLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +class CLIPTextEmbeddings(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings + + +class CLIPAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +class CLIPMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class CLIPEncoderLayer(nn.Module): + def __init__(self, config: CLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = CLIPAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = CLIPMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class CLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = CLIPConfig + base_model_prefix = "clip" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, CLIPTextEmbeddings): + module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + elif isinstance(module, CLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, CLIPAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, CLIPMLP): + factor = self.config.initializer_factor + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, CLIPModel): + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim**-0.5 * self.config.initializer_factor, + ) + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, + ) + elif isinstance(module, CLIPVisionModelWithProjection): + nn.init.normal_( + module.visual_projection.weight, + std=self.config.hidden_size**-0.5 * self.config.initializer_factor, + ) + elif isinstance(module, CLIPTextModelWithProjection): + nn.init.normal_( + module.text_projection.weight, + std=self.config.hidden_size**-0.5 * self.config.initializer_factor, + ) + + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +CLIP_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class CLIPEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`CLIPEncoderLayer`]. + + Args: + config: CLIPConfig + """ + + def __init__(self, config: CLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class CLIPTextTransformer(nn.Module): + def __init__(self, config: CLIPTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = CLIPTextEmbeddings(config) + self.encoder = CLIPEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + # For `pooled_output` computation + self.eos_token_id = config.eos_token_id + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None: + raise ValueError("You have to specify input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = _create_4d_causal_attention_mask( + input_shape, hidden_states.dtype, device=hidden_states.device + ) + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + if self.eos_token_id == 2: + # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. + # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added + # ------------------------------------------------------------ + # text_embeds.shape = [batch_size, sequence_length, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), + input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), + ] + else: + # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), + # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`) + (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id) + .int() + .argmax(dim=-1), + ] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """The text model from CLIP without any head or projection on top.""", + CLIP_START_DOCSTRING, +) +class CLIPTextModel(CLIPPreTrainedModel): + config_class = CLIPTextConfig + + _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] + + def __init__(self, config: CLIPTextConfig): + super().__init__(config) + self.text_model = CLIPTextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, value): + self.text_model.embeddings.token_embedding = value + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, CLIPTextModel + + >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class CLIPVisionTransformer(nn.Module): + def __init__(self, config: CLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = CLIPVisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = CLIPEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """The vision model from CLIP without any head or projection on top.""", + CLIP_START_DOCSTRING, +) +class CLIPVisionModel(CLIPPreTrainedModel): + config_class = CLIPVisionConfig + main_input_name = "pixel_values" + _no_split_modules = ["CLIPEncoderLayer"] + + def __init__(self, config: CLIPVisionConfig): + super().__init__(config) + self.vision_model = CLIPVisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, CLIPVisionModel + + >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +@add_start_docstrings(CLIP_START_DOCSTRING) +class CLIPModel(CLIPPreTrainedModel): + config_class = CLIPConfig + _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] + + def __init__(self, config: CLIPConfig): + super().__init__(config) + + if not isinstance(config.text_config, CLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type CLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, CLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type CLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = CLIPTextTransformer(text_config) + self.vision_model = CLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`CLIPTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, CLIPModel + + >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + ```""" + # Use CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[1] + text_features = self.text_projection(pooled_output) + + return text_features + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`CLIPVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, CLIPModel + + >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> image_features = model.get_image_features(**inputs) + ```""" + # Use CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(pooled_output) + + return image_features + + @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CLIPOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, CLIPModel + + >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + loss = clip_loss(logits_per_text) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return CLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +@add_start_docstrings( + """ + CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output). + """, + CLIP_START_DOCSTRING, +) +class CLIPTextModelWithProjection(CLIPPreTrainedModel): + config_class = CLIPTextConfig + + _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] + + def __init__(self, config: CLIPTextConfig): + super().__init__(config) + + self.text_model = CLIPTextTransformer(config) + + self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, value): + self.text_model.embeddings.token_embedding = value + + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CLIPTextModelOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, CLIPTextModelWithProjection + + >>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> text_embeds = outputs.text_embeds + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[1] + + text_embeds = self.text_projection(pooled_output) + + if not return_dict: + outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] + return tuple(output for output in outputs if output is not None) + + return CLIPTextModelOutput( + text_embeds=text_embeds, + last_hidden_state=text_outputs.last_hidden_state, + hidden_states=text_outputs.hidden_states, + attentions=text_outputs.attentions, + ) + + +@add_start_docstrings( + """ + CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output). + """, + CLIP_START_DOCSTRING, +) +class CLIPVisionModelWithProjection(CLIPPreTrainedModel): + config_class = CLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: CLIPVisionConfig): + super().__init__(config) + + self.vision_model = CLIPVisionTransformer(config) + + self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CLIPVisionModelOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection + + >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> image_embeds = outputs.image_embeds + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + + image_embeds = self.visual_projection(pooled_output) + + if not return_dict: + outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:] + return tuple(output for output in outputs if output is not None) + + return CLIPVisionModelOutput( + image_embeds=image_embeds, + last_hidden_state=vision_outputs.last_hidden_state, + hidden_states=vision_outputs.hidden_states, + attentions=vision_outputs.attentions, + ) + + +@add_start_docstrings( + """ + CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of + the patch tokens) e.g. for ImageNet. + """, + CLIP_START_DOCSTRING, +) +class CLIPForImageClassification(CLIPPreTrainedModel): + main_input_name = "pixel_values" + + def __init__(self, config: CLIPConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.vision_model = CLIPVisionTransformer(config.vision_config) + + # Classifier head + self.classifier = ( + nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.vision_model( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + # average pool the patch tokens + sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1) + # apply classifier + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..265e7005b74e0e18a05cfa95eb3aa3675cb45f00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py @@ -0,0 +1,1295 @@ +# coding=utf-8 +# Copyright 2021 The OpenAI Team Authors, The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Optional, Tuple, Union + +import flax +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling +from ...modeling_flax_utils import ( + ACT2FN, + FlaxPreTrainedModel, + append_replace_return_docstrings, + overwrite_call_docstring, +) +from ...utils import ModelOutput, add_start_docstrings, logging +from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig + + +logger = logging.get_logger(__name__) + +CLIP_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading, saving and converting weights from PyTorch models) + + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@flax.struct.dataclass +class FlaxCLIPTextModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`jnp.ndarray` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + [`FlaxCLIPTextModel`]. + last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + text_embeds: jnp.ndarray = None + last_hidden_state: jnp.ndarray = None + hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None + attentions: Optional[Tuple[jnp.ndarray, ...]] = None + + +@flax.struct.dataclass +class FlaxCLIPOutput(ModelOutput): + """ + Args: + logits_per_image:(`jnp.ndarray` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`jnp.ndarray` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + [`FlaxCLIPTextModel`]. + image_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + [`FlaxCLIPVisionModel`]. + text_model_output(`FlaxBaseModelOutputWithPooling`): + The output of the [`FlaxCLIPTextModel`]. + vision_model_output(`FlaxBaseModelOutputWithPooling`): + The output of the [`FlaxCLIPVisionModel`]. + """ + + logits_per_image: jnp.ndarray = None + logits_per_text: jnp.ndarray = None + text_embeds: jnp.ndarray = None + image_embeds: jnp.ndarray = None + text_model_output: FlaxBaseModelOutputWithPooling = None + vision_model_output: FlaxBaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class FlaxCLIPVisionEmbeddings(nn.Module): + config: CLIPVisionConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + image_size = self.config.image_size + patch_size = self.config.patch_size + + self.class_embedding = self.param("class_embedding", jax.nn.initializers.normal(stddev=0.02), (embed_dim,)) + + self.patch_embedding = nn.Conv( + embed_dim, + kernel_size=(patch_size, patch_size), + strides=(patch_size, patch_size), + padding="VALID", + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(), + ) + + self.num_patches = (image_size // patch_size) ** 2 + num_positions = self.num_patches + 1 + self.position_embedding = nn.Embed(num_positions, embed_dim, embedding_init=jax.nn.initializers.normal()) + self.position_ids = jnp.expand_dims(jnp.arange(0, num_positions, dtype="i4"), axis=0) + + def __call__(self, pixel_values): + patch_embeds = self.patch_embedding(pixel_values) + batch_size, height, width, channels = patch_embeds.shape + patch_embeds = jnp.reshape(patch_embeds, (batch_size, height * width, channels)) + + class_embeds = jnp.expand_dims(self.class_embedding, axis=(0, 1)) + class_embeds = jnp.tile(class_embeds, (batch_size, 1, 1)) + embeddings = jnp.concatenate([class_embeds, patch_embeds], axis=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +class FlaxCLIPTextEmbeddings(nn.Module): + config: CLIPTextConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + + self.token_embedding = nn.Embed(self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal()) + self.position_embedding = nn.Embed( + self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal() + ) + self.position_ids = jnp.expand_dims( + jnp.arange(0, self.config.max_position_embeddings, dtype="i4"), axis=(0, 1) + ) + + def __call__(self, input_ids, position_ids): + input_embeds = self.token_embedding(input_ids.astype("i4")) + position_embeds = self.position_embedding(position_ids.astype("i4")) + + embeddings = input_embeds + position_embeds + return embeddings + + +class FlaxCLIPAttention(nn.Module): + config: Union[CLIPTextConfig, CLIPVisionConfig] + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embed_dim = self.config.hidden_size + self.num_heads = self.config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = self.config.attention_dropout + + self.k_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) + self.v_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) + self.q_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) + self.out_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) + + self.causal = isinstance(self.config, CLIPTextConfig) + if self.causal: + self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype="i4")) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + output_attentions: bool = False, + ): + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query) + key = self._split_heads(key) + value = self._split_heads(value) + + causal_attention_mask = None + if self.causal: + query_length, key_length = query.shape[1], key.shape[1] + causal_attention_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length] + + if attention_mask is not None and causal_attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + attention_mask = combine_masks(attention_mask, causal_attention_mask, dtype="i4") + elif causal_attention_mask is not None: + attention_mask = causal_attention_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + if attention_mask is not None: + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + else: + attention_bias = None + + dropout_rng = None + if not deterministic and self.dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + attn_weights = dot_product_attention_weights( + query, + key, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.dropout, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +class FlaxCLIPMLP(nn.Module): + config: Union[CLIPTextConfig, CLIPVisionConfig] + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.activation_fn = ACT2FN[self.config.hidden_act] + self.fc1 = nn.Dense( + self.config.intermediate_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(0.01), + ) + self.fc2 = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01)) + + def __call__(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class FlaxCLIPEncoderLayer(nn.Module): + config: Union[CLIPTextConfig, CLIPVisionConfig] + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.self_attn = FlaxCLIPAttention(self.config, dtype=self.dtype) + self.layer_norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + self.mlp = FlaxCLIPMLP(self.config, dtype=self.dtype) + self.layer_norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask, + deterministic: bool = True, + output_attentions: bool = False, + ): + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + attn_outputs = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + ) + hidden_states = attn_outputs[0] + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += attn_outputs[1:] + + return outputs + + +class FlaxCLIPLayerCollection(nn.Module): + config: Union[CLIPTextConfig, CLIPVisionConfig] + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.layers = [ + FlaxCLIPEncoderLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = layer( + hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = (hidden_states,) + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + +class FlaxCLIPEncoder(nn.Module): + config: Union[CLIPTextConfig, CLIPVisionConfig] + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.layers = FlaxCLIPLayerCollection(self.config, dtype=self.dtype) + + def __call__( + self, + inputs_embeds, + attention_mask=None, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + return self.layers( + hidden_states=inputs_embeds, + attention_mask=attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class FlaxCLIPTextTransformer(nn.Module): + config: CLIPTextConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embeddings = FlaxCLIPTextEmbeddings(self.config, dtype=self.dtype) + self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype) + self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + + # For `pooled_output` computation + self.eos_token_id = self.config.eos_token_id + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + if self.eos_token_id == 2: + # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. + # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added + # ------------------------------------------------------------ + # text_embeds.shape = [batch_size, sequence_length, transformer.width] + # take features from the EOS embedding (eos_token_id is the highest number in each sequence) + pooled_output = last_hidden_state[jnp.arange(last_hidden_state.shape[0]), input_ids.argmax(axis=-1)] + else: + # (no need to cast from bool to int after comparing to `eos_token_id`) + pooled_output = last_hidden_state[ + jnp.arange(last_hidden_state.shape[0]), (input_ids == self.eos_token_id).argmax(axis=-1) + ] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return FlaxBaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class FlaxCLIPVisionTransformer(nn.Module): + config: CLIPVisionConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embeddings = FlaxCLIPVisionEmbeddings(self.config, dtype=self.dtype) + self.pre_layrnorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype) + self.post_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + + def __call__( + self, + pixel_values=None, + deterministic: bool = True, + output_attentions=None, + output_hidden_states=None, + return_dict: bool = True, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return FlaxBaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class FlaxCLIPTextPreTrainedModel(FlaxPreTrainedModel): + config_class = CLIPTextConfig + module_class: nn.Module = None + + def __init__( + self, + config: CLIPTextConfig, + input_shape=(1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensor + input_ids = jnp.zeros(input_shape, dtype="i4") + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + attention_mask = jnp.ones_like(input_ids) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, input_ids, attention_mask, position_ids)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if position_ids is None: + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + return self.module.apply( + {"params": params or self.params}, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + ) + + +class FlaxCLIPVisionPreTrainedModel(FlaxPreTrainedModel): + config_class = CLIPVisionConfig + main_input_name = "pixel_values" + module_class: nn.Module = None + + def __init__( + self, + config: CLIPVisionConfig, + input_shape: Optional[Tuple] = None, + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + if input_shape is None: + input_shape = (1, config.image_size, config.image_size, 3) + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensor + pixel_values = jax.random.normal(rng, input_shape) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, pixel_values)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def __call__( + self, + pixel_values, + params: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + return self.module.apply( + {"params": params or self.params}, + jnp.array(pixel_values, dtype=jnp.float32), + not train, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + ) + + +class FlaxCLIPPreTrainedModel(FlaxPreTrainedModel): + config_class = CLIPConfig + module_class: nn.Module = None + + def __init__( + self, + config: CLIPConfig, + input_shape: Optional[Tuple] = None, + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + if input_shape is None: + input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensor + input_ids = jnp.zeros(input_shape[0], dtype="i4") + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) + attention_mask = jnp.ones_like(input_ids) + + pixel_values = jax.random.normal(rng, input_shape[1]) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def __call__( + self, + input_ids, + pixel_values, + attention_mask=None, + position_ids=None, + params: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if position_ids is None: + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + return self.module.apply( + {"params": params or self.params}, + jnp.array(input_ids, dtype="i4"), + jnp.array(pixel_values, dtype=jnp.float32), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + ) + + def get_text_features( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train=False, + ): + r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + Returns: + text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying + the projection layer to the pooled output of [`FlaxCLIPTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, FlaxCLIPModel + + >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") + >>> text_features = model.get_text_features(**inputs) + ```""" + if position_ids is None: + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + def _get_features(module, input_ids, attention_mask, position_ids, deterministic): + text_outputs = module.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + ) + pooled_output = text_outputs[1] + text_features = module.text_projection(pooled_output) + return text_features + + return self.module.apply( + {"params": params or self.params}, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + method=_get_features, + rngs=rngs, + ) + + def get_image_features( + self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False + ): + r""" + Args: + pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained + using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + + Returns: + image_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`FlaxCLIPVisionModel`] + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, FlaxCLIPModel + + >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="np") + + >>> image_features = model.get_image_features(**inputs) + ```""" + pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + def _get_features(module, pixel_values, deterministic): + vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) + pooled_output = vision_outputs[1] # pooled_output + image_features = module.visual_projection(pooled_output) + return image_features + + return self.module.apply( + {"params": params or self.params}, + jnp.array(pixel_values, dtype=jnp.float32), + not train, + method=_get_features, + rngs=rngs, + ) + + +class FlaxCLIPTextModule(nn.Module): + config: CLIPTextConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class FlaxCLIPTextModel(FlaxCLIPTextPreTrainedModel): + module_class = FlaxCLIPTextModule + + +FLAX_CLIP_TEXT_MODEL_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, FlaxCLIPTextModel + + >>> model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooler_output = outputs.pooler_output # pooled (EOS token) states + ``` +""" + +overwrite_call_docstring(FlaxCLIPTextModel, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_DOCSTRING) +append_replace_return_docstrings( + FlaxCLIPTextModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPTextConfig +) + + +class FlaxCLIPTextModelWithProjectionModule(nn.Module): + config: CLIPTextConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype) + self.text_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = text_outputs[1] + text_embeds = self.text_projection(pooled_output) + + if not return_dict: + return (text_embeds, text_outputs[0]) + text_outputs[2:] + + return FlaxCLIPTextModelOutput( + text_embeds=text_embeds, + last_hidden_state=text_outputs.last_hidden_state, + hidden_states=text_outputs.hidden_states, + attentions=text_outputs.attentions, + ) + + +class FlaxCLIPTextModelWithProjection(FlaxCLIPTextPreTrainedModel): + module_class = FlaxCLIPTextModelWithProjectionModule + + +FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, FlaxCLIPTextModelWithProjection + + >>> model = FlaxCLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np") + + >>> outputs = model(**inputs) + >>> text_embeds = outputs.text_embeds + ``` +""" + +overwrite_call_docstring( + FlaxCLIPTextModelWithProjection, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING +) +append_replace_return_docstrings( + FlaxCLIPTextModelWithProjection, output_type=FlaxCLIPTextModelOutput, config_class=CLIPTextConfig +) + + +class FlaxCLIPVisionModule(nn.Module): + config: CLIPVisionConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.vision_model = FlaxCLIPVisionTransformer(self.config, dtype=self.dtype) + + def __call__( + self, + pixel_values, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + return self.vision_model( + pixel_values=pixel_values, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class FlaxCLIPVisionModel(FlaxCLIPVisionPreTrainedModel): + module_class = FlaxCLIPVisionModule + + +FLAX_CLIP_VISION_MODEL_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, FlaxCLIPVisionModel + + >>> model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="np") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooler_output = outputs.pooler_output # pooled CLS states + ``` +""" + +overwrite_call_docstring(FlaxCLIPVisionModel, CLIP_VISION_INPUTS_DOCSTRING + FLAX_CLIP_VISION_MODEL_DOCSTRING) +append_replace_return_docstrings( + FlaxCLIPVisionModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPVisionConfig +) + + +class FlaxCLIPModule(nn.Module): + config: CLIPConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + text_config = self.config.text_config + vision_config = self.config.vision_config + + self.projection_dim = self.config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = FlaxCLIPTextTransformer(text_config, dtype=self.dtype) + self.vision_model = FlaxCLIPVisionTransformer(vision_config, dtype=self.dtype) + + self.visual_projection = nn.Dense( + self.projection_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(0.02), + use_bias=False, + ) + self.text_projection = nn.Dense( + self.projection_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(0.02), + use_bias=False, + ) + + self.logit_scale = self.param( + "logit_scale", lambda _, shape: jnp.ones(shape) * self.config.logit_scale_init_value, [] + ) + + def __call__( + self, + input_ids=None, + pixel_values=None, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + return_dict = return_dict if return_dict is not None else self.config.return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) + text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) + + # cosine similarity as logits + logit_scale = jnp.exp(self.logit_scale) + logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale + logits_per_image = logits_per_text.T + + if not return_dict: + return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + + return FlaxCLIPOutput( + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +@add_start_docstrings(CLIP_START_DOCSTRING) +class FlaxCLIPModel(FlaxCLIPPreTrainedModel): + module_class = FlaxCLIPModule + + +FLAX_CLIP_MODEL_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> import jax + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, FlaxCLIPModel + + >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities + ``` +""" + +overwrite_call_docstring(FlaxCLIPModel, CLIP_INPUTS_DOCSTRING + FLAX_CLIP_MODEL_DOCSTRING) +append_replace_return_docstrings(FlaxCLIPModel, output_type=FlaxCLIPOutput, config_class=CLIPConfig) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..c7e8ba7f5c954e95de08d39966b7a3fd934f9f93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py @@ -0,0 +1,1461 @@ +# coding=utf-8 +# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 CLIP model.""" + + +from __future__ import annotations + +import math +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling + +# Public API +from ...modeling_tf_utils import ( + TFModelInputType, + TFPreTrainedModel, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32" + + +from ..deprecated._archive_maps import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +LARGE_NEGATIVE = -1e8 + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: + return tf.math.reduce_mean( + keras.metrics.sparse_categorical_crossentropy( + y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True + ) + ) + + +def clip_loss(similarity: tf.Tensor) -> tf.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(tf.transpose(similarity)) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +class TFCLIPOutput(ModelOutput): + """ + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`]. + image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + [`TFCLIPVisionModel`]. + text_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]): + The output of the [`TFCLIPTextModel`]. + vision_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]): + The output of the [`TFCLIPVisionModel`]. + """ + + loss: tf.Tensor | None = None + logits_per_image: tf.Tensor = None + logits_per_text: tf.Tensor = None + text_embeds: tf.Tensor = None + image_embeds: tf.Tensor = None + text_model_output: TFBaseModelOutputWithPooling = None + vision_model_output: TFBaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class TFCLIPVisionEmbeddings(keras.layers.Layer): + def __init__(self, config: CLIPVisionConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + + self.config = config + + self.patch_embedding = keras.layers.Conv2D( + filters=self.embed_dim, + kernel_size=self.patch_size, + strides=self.patch_size, + padding="valid", + data_format="channels_last", + use_bias=False, + kernel_initializer=get_initializer(self.config.initializer_range * self.config.initializer_factor), + name="patch_embedding", + ) + + def build(self, input_shape: tf.TensorShape = None): + factor = self.config.initializer_factor + + self.class_embedding = self.add_weight( + shape=(self.embed_dim,), + initializer=get_initializer(self.embed_dim**-0.5 * factor), + trainable=True, + name="class_embedding", + ) + + with tf.name_scope("position_embedding"): + self.position_embedding = self.add_weight( + shape=(self.num_positions, self.embed_dim), + initializer=get_initializer(self.config.initializer_range * factor), + trainable=True, + name="embeddings", + ) + + if self.built: + return + self.built = True + if getattr(self, "patch_embedding", None) is not None: + with tf.name_scope(self.patch_embedding.name): + self.patch_embedding.build([None, None, None, self.config.num_channels]) + + def call(self, pixel_values: tf.Tensor) -> tf.Tensor: + """`pixel_values` is expected to be of NCHW format.""" + + batch_size, num_channels, height, width = shape_list(pixel_values) + + # When running on CPU, `tf.nn.conv2d` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + # shape = (batch_size, in_height, in_width, in_channels=num_channels) + pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) + + patch_embeds = self.patch_embedding(pixel_values) + + # Change the 2D spatial dimensions to a single temporal dimension. + # shape = (batch_size, num_patches, out_channels=embed_dim) + patch_embeds = tf.reshape(tensor=patch_embeds, shape=(batch_size, self.num_patches, -1)) + + # add the [CLS] token to the embedded patch tokens + class_embeds = tf.broadcast_to(self.class_embedding, shape=(batch_size, 1, self.embed_dim)) + embeddings = tf.concat((class_embeds, patch_embeds), axis=1) + + embeddings = embeddings + self.position_embedding + + return embeddings + + +class TFCLIPTextEmbeddings(keras.layers.Layer): + def __init__(self, config: CLIPTextConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + + self.config = config + + def build(self, input_shape: tf.TensorShape = None): + with tf.name_scope("token_embedding"): + self.weight = self.add_weight( + shape=(self.config.vocab_size, self.embed_dim), + initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), + trainable=True, + name="weight", + ) + + with tf.name_scope("position_embedding"): + self.position_embedding = self.add_weight( + shape=(self.config.max_position_embeddings, self.embed_dim), + initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), + trainable=True, + name="embeddings", + ) + + super().build(input_shape) + + def call( + self, + input_ids: tf.Tensor = None, + position_ids: tf.Tensor = None, + inputs_embeds: tf.Tensor = None, + ) -> tf.Tensor: + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + if input_ids is None and inputs_embeds is None: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.config.vocab_size) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if position_ids is None: + position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) + + position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) + position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) + final_embeddings = inputs_embeds + position_embeds + + return final_embeddings + + +class TFCLIPAttention(keras.layers.Layer): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: CLIPConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = self.embed_dim // self.num_attention_heads + if self.attention_head_size * self.num_attention_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_attention_heads})." + ) + + factor = config.initializer_factor + in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (self.embed_dim**-0.5) * factor + + self.sqrt_att_head_size = math.sqrt(self.attention_head_size) + + self.q_proj = keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj" + ) + self.k_proj = keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj" + ) + self.v_proj = keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj" + ) + + self.dropout = keras.layers.Dropout(rate=config.attention_dropout) + + self.out_proj = keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj" + ) + + # copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores + def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) + + # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] + return tf.transpose(tensor, perm=[0, 2, 1, 3]) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + causal_attention_mask: tf.Tensor, + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + """Input shape: Batch x Time x Channel""" + + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.q_proj(inputs=hidden_states) + mixed_key_layer = self.k_proj(inputs=hidden_states) + mixed_value_layer = self.v_proj(inputs=hidden_states) + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) + attention_scores = tf.divide(attention_scores, dk) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function) + attention_scores = tf.add(attention_scores, causal_attention_mask) + + if attention_mask is not None: + # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function) + attention_scores = tf.add(attention_scores, attention_mask) + + # Normalize the attention scores to probabilities. + _attention_probs = stable_softmax(logits=attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(inputs=_attention_probs, training=training) + + attention_output = tf.matmul(attention_probs, value_layer) + attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) + + # (batch_size, seq_len_q, embed_dim) + attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) + + attention_output = self.out_proj(attention_output, training=training) + # In TFBert, attention weights are returned after dropout. + # However, in CLIP, they are returned before dropout. + outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "q_proj", None) is not None: + with tf.name_scope(self.q_proj.name): + self.q_proj.build([None, None, self.embed_dim]) + if getattr(self, "k_proj", None) is not None: + with tf.name_scope(self.k_proj.name): + self.k_proj.build([None, None, self.embed_dim]) + if getattr(self, "v_proj", None) is not None: + with tf.name_scope(self.v_proj.name): + self.v_proj.build([None, None, self.embed_dim]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.embed_dim]) + + +class TFCLIPMLP(keras.layers.Layer): + def __init__(self, config: CLIPConfig, **kwargs): + super().__init__(**kwargs) + + self.activation_fn = get_tf_activation(config.hidden_act) + + factor = config.initializer_factor + in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor + fc_std = (2 * config.hidden_size) ** -0.5 * factor + + self.fc1 = keras.layers.Dense( + units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1" + ) + self.fc2 = keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2" + ) + self.config = config + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.fc1(inputs=hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(inputs=hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "fc1", None) is not None: + with tf.name_scope(self.fc1.name): + self.fc1.build([None, None, self.config.hidden_size]) + if getattr(self, "fc2", None) is not None: + with tf.name_scope(self.fc2.name): + self.fc2.build([None, None, self.config.intermediate_size]) + + +class TFCLIPEncoderLayer(keras.layers.Layer): + def __init__(self, config: CLIPConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.self_attn = TFCLIPAttention(config, name="self_attn") + self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") + self.mlp = TFCLIPMLP(config, name="mlp") + self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + causal_attention_mask: tf.Tensor, + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + causal_attention_mask (`tf.Tensor`): causal attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`): + Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned + tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(inputs=hidden_states) + attention_outputs = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + training=training, + ) + hidden_states = attention_outputs[0] + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(inputs=hidden_states) + hidden_states = self.mlp(hidden_states=hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "layer_norm1", None) is not None: + with tf.name_scope(self.layer_norm1.name): + self.layer_norm1.build([None, None, self.embed_dim]) + if getattr(self, "mlp", None) is not None: + with tf.name_scope(self.mlp.name): + self.mlp.build(None) + if getattr(self, "layer_norm2", None) is not None: + with tf.name_scope(self.layer_norm2.name): + self.layer_norm2.build([None, None, self.embed_dim]) + + +class TFCLIPEncoder(keras.layers.Layer): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`TFCLIPEncoderLayer`]. + + Args: + config: CLIPConfig + """ + + def __init__(self, config: CLIPConfig, **kwargs): + super().__init__(**kwargs) + + self.layers = [TFCLIPEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)] + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + causal_attention_mask: tf.Tensor, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = layer_module( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + training=training, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) + + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFCLIPTextTransformer(keras.layers.Layer): + def __init__(self, config: CLIPTextConfig, **kwargs): + super().__init__(**kwargs) + + self.embeddings = TFCLIPTextEmbeddings(config, name="embeddings") + self.encoder = TFCLIPEncoder(config, name="encoder") + self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm") + + # For `pooled_output` computation + self.eos_token_id = config.eos_token_id + self.embed_dim = config.hidden_size + + def call( + self, + input_ids: TFModelInputType, + attention_mask: tf.Tensor, + position_ids: tf.Tensor, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + input_shape = shape_list(input_ids) + + embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + batch_size, seq_length = input_shape + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) + + # check attention mask and invert + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask) + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + sequence_output = self.final_layer_norm(inputs=sequence_output) + + if self.eos_token_id == 2: + # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. + # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added + # ------------------------------------------------------------ + # text_embeds.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + pooled_output = tf.gather_nd( + params=sequence_output, + indices=tf.stack( + values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1 + ), + ) + else: + # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) + pooled_output = tf.gather_nd( + params=sequence_output, + indices=tf.stack( + values=( + tf.range(input_shape[0], dtype=tf.int64), + tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1), + ), + axis=1, + ), + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return TFBaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): + # It is possible with an unspecified sequence length for seq_length to be + # a runtime value, which is unsupported by tf.constant. Per the TensorFlow + # docs, tf.fill can handle runtime dynamic shapes: + # https://www.tensorflow.org/api_docs/python/tf/fill + diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) + + # set an additive 2D attention mask with all places being masked + to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) + + # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked) + # TIP: think the 2D matrix as the space of (query_seq, key_seq) + to_mask = tf.linalg.band_part(to_mask, 0, -1) + # to_mask = tf.linalg.band_part(to_mask, -1, 0) + to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) + + return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "final_layer_norm", None) is not None: + with tf.name_scope(self.final_layer_norm.name): + self.final_layer_norm.build([None, None, self.embed_dim]) + + +@keras_serializable +class TFCLIPTextMainLayer(keras.layers.Layer): + config_class = CLIPTextConfig + + def __init__(self, config: CLIPTextConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.text_model = TFCLIPTextTransformer(config, name="text_model") + + def get_input_embeddings(self) -> keras.layers.Layer: + return self.text_model.embeddings + + def set_input_embeddings(self, value: tf.Variable): + self.text_model.embeddings.weight = value + self.text_model.embeddings.vocab_size = shape_list(value)[0] + + @unpack_inputs + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + if input_ids is None: + raise ValueError("You have to specify input_ids") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + + text_model_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return text_model_outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "text_model", None) is not None: + with tf.name_scope(self.text_model.name): + self.text_model.build(None) + + +class TFCLIPVisionTransformer(keras.layers.Layer): + def __init__(self, config: CLIPVisionConfig, **kwargs): + super().__init__(**kwargs) + + self.embeddings = TFCLIPVisionEmbeddings(config, name="embeddings") + self.pre_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="pre_layrnorm") + self.encoder = TFCLIPEncoder(config, name="encoder") + self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm") + self.embed_dim = config.hidden_size + + def call( + self, + pixel_values: TFModelInputType, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + embedding_output = self.embeddings(pixel_values=pixel_values) + embedding_output = self.pre_layernorm(inputs=embedding_output) + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + attention_mask=None, + causal_attention_mask=None, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + pooled_output = sequence_output[:, 0, :] + pooled_output = self.post_layernorm(inputs=pooled_output) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return TFBaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + if getattr(self, "pre_layernorm", None) is not None: + with tf.name_scope(self.pre_layernorm.name): + self.pre_layernorm.build([None, None, self.embed_dim]) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "post_layernorm", None) is not None: + with tf.name_scope(self.post_layernorm.name): + self.post_layernorm.build([None, self.embed_dim]) + + +@keras_serializable +class TFCLIPVisionMainLayer(keras.layers.Layer): + config_class = CLIPVisionConfig + + def __init__(self, config: CLIPVisionConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.vision_model = TFCLIPVisionTransformer(config, name="vision_model") + + def get_input_embeddings(self) -> keras.layers.Layer: + return self.vision_model.embeddings + + @unpack_inputs + def call( + self, + pixel_values: TFModelInputType | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + vision_model_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return vision_model_outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "vision_model", None) is not None: + with tf.name_scope(self.vision_model.name): + self.vision_model.build(None) + + +@keras_serializable +class TFCLIPMainLayer(keras.layers.Layer): + config_class = CLIPConfig + + def __init__(self, config: CLIPConfig, **kwargs): + super().__init__(**kwargs) + + if not isinstance(config.text_config, CLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type CLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, CLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type CLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + self.config = config + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + + self.text_model = TFCLIPTextTransformer(text_config, name="text_model") + self.vision_model = TFCLIPVisionTransformer(vision_config, name="vision_model") + + self.visual_projection = keras.layers.Dense( + units=self.projection_dim, + kernel_initializer=get_initializer(vision_config.hidden_size**-0.5 * self.config.initializer_factor), + use_bias=False, + name="visual_projection", + ) + + self.text_projection = keras.layers.Dense( + units=self.projection_dim, + kernel_initializer=get_initializer(text_config.hidden_size**-0.5 * self.config.initializer_factor), + use_bias=False, + name="text_projection", + ) + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + def build(self, input_shape: tf.TensorShape = None): + self.logit_scale = self.add_weight( + shape=(1,), + initializer=keras.initializers.Constant(self.config.logit_scale_init_value), + trainable=True, + name="logit_scale", + ) + + if self.built: + return + self.built = True + if getattr(self, "text_model", None) is not None: + with tf.name_scope(self.text_model.name): + self.text_model.build(None) + if getattr(self, "vision_model", None) is not None: + with tf.name_scope(self.vision_model.name): + self.vision_model.build(None) + if getattr(self, "visual_projection", None) is not None: + with tf.name_scope(self.visual_projection.name): + self.visual_projection.build([None, None, self.vision_embed_dim]) + if getattr(self, "text_projection", None) is not None: + with tf.name_scope(self.text_projection.name): + self.text_projection.build([None, None, self.text_embed_dim]) + + @unpack_inputs + def get_text_features( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = text_outputs[1] + text_features = self.text_projection(inputs=pooled_output) + + return text_features + + @unpack_inputs + def get_image_features( + self, + pixel_values: TFModelInputType | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(inputs=pooled_output) + + return image_features + + @unpack_inputs + def call( + self, + input_ids: TFModelInputType | None = None, + pixel_values: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]: + if input_ids is None: + raise ValueError("You have to specify either input_ids") + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(inputs=image_embeds) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(inputs=text_embeds) + + # normalized features + image_embeds = image_embeds / tf.norm(tensor=image_embeds, ord="euclidean", axis=-1, keepdims=True) + text_embeds = text_embeds / tf.norm(tensor=text_embeds, ord="euclidean", axis=-1, keepdims=True) + + # cosine similarity as logits + logit_scale = tf.math.exp(self.logit_scale) + logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale + logits_per_image = tf.transpose(logits_per_text) + + loss = None + if return_loss: + loss = clip_loss(logits_per_text) + loss = tf.reshape(loss, (1,)) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return (loss,) + output if loss is not None else output + + return TFCLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +class TFCLIPPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = CLIPConfig + base_model_prefix = "clip" + _keys_to_ignore_on_load_missing = [r"position_ids"] + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + + +CLIP_START_DOCSTRING = r""" + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Args: + config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + +CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to + return the attentions tensors of all attention layers. See `attentions` under returned tensors for more + detail. This argument can be used only in eager mode, in graph mode the value in the config will be used + instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + +CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`CLIPImageProcessor.__call__`] for details. + attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +class TFCLIPTextModel(TFCLIPPreTrainedModel): + config_class = CLIPTextConfig + + def __init__(self, config: CLIPTextConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.clip = TFCLIPTextMainLayer(config, name="clip") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPTextConfig) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, TFCLIPTextModel + + >>> model = TFCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + + outputs = self.clip( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "clip", None) is not None: + with tf.name_scope(self.clip.name): + self.clip.build(None) + + +class TFCLIPVisionModel(TFCLIPPreTrainedModel): + config_class = CLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: CLIPVisionConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.clip = TFCLIPVisionMainLayer(config, name="clip") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPVisionConfig) + def call( + self, + pixel_values: TFModelInputType | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFCLIPVisionModel + + >>> model = TFCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="tf") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + + outputs = self.clip( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "clip", None) is not None: + with tf.name_scope(self.clip.name): + self.clip.build(None) + + +@add_start_docstrings(CLIP_START_DOCSTRING) +class TFCLIPModel(TFCLIPPreTrainedModel): + config_class = CLIPConfig + + def __init__(self, config: CLIPConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.clip = TFCLIPMainLayer(config, name="clip") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def get_text_features( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + r""" + Returns: + text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying + the projection layer to the pooled output of [`TFCLIPTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, TFCLIPModel + + >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") + >>> text_features = model.get_text_features(**inputs) + ```""" + + text_features = self.clip.get_text_features( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return text_features + + @unpack_inputs + @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: TFModelInputType | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + r""" + Returns: + image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying + the projection layer to the pooled output of [`TFCLIPVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFCLIPModel + + >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="tf") + + >>> image_features = model.get_image_features(**inputs) + ```""" + + image_features = self.clip.get_image_features( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return image_features + + @unpack_inputs + @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=TFCLIPOutput, config_class=CLIPConfig) + def call( + self, + input_ids: TFModelInputType | None = None, + pixel_values: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> import tensorflow as tf + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFCLIPModel + + >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32") + >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities + ```""" + + outputs = self.clip( + input_ids=input_ids, + pixel_values=pixel_values, + attention_mask=attention_mask, + position_ids=position_ids, + return_loss=return_loss, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return outputs + + def serving_output(self, output: TFCLIPOutput) -> TFCLIPOutput: + # TODO: As is this currently fails with saved_model=True, because + # TensorFlow cannot trace through nested dataclasses. Reference: + # https://github.com/huggingface/transformers/pull/16886 + return output + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "clip", None) is not None: + with tf.name_scope(self.clip.name): + self.clip.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..60805402b4cea7d6da9f4852a48f960564a8f4ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for CLIP +""" + +import warnings + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class CLIPProcessor(ProcessorMixin): + r""" + Constructs a CLIP processor which wraps a CLIP image processor and a CLIP tokenizer into a single processor. + + [`CLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`CLIPTokenizerFast`]. See the + [`~CLIPProcessor.__call__`] and [`~CLIPProcessor.decode`] for more information. + + Args: + image_processor ([`CLIPImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`CLIPTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "CLIPImageProcessor" + tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + feature_extractor = None + if "feature_extractor" in kwargs: + warnings.warn( + "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" + " instead.", + FutureWarning, + ) + feature_extractor = kwargs.pop("feature_extractor") + + image_processor = image_processor if image_processor is not None else feature_extractor + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + + def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring + of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + tokenizer_kwargs, image_processor_kwargs = {}, {} + if kwargs: + tokenizer_kwargs = {k: v for k, v in kwargs.items() if k not in self.image_processor._valid_processor_keys} + image_processor_kwargs = { + k: v for k, v in kwargs.items() if k in self.image_processor._valid_processor_keys + } + + if text is None and images is None: + raise ValueError("You have to specify either text or images. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **tokenizer_kwargs) + + if images is not None: + image_features = self.image_processor(images, return_tensors=return_tensors, **image_processor_kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) + + @property + def feature_extractor_class(self): + warnings.warn( + "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", + FutureWarning, + ) + return self.image_processor_class + + @property + def feature_extractor(self): + warnings.warn( + "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", + FutureWarning, + ) + return self.image_processor diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py b/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4ad88b80a9e096106f946445212c71adfd9cf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py @@ -0,0 +1,516 @@ +# coding=utf-8 +# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for CLIP.""" + +import json +import os +import unicodedata +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def whitespace_clean(text): + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + do_split_on_punc (`bool`, *optional*, defaults to `True`): + In some instances we want to skip the basic punctuation splitting so that later tokenization can capture + the full context of the words, such as contractions. + """ + + def __init__( + self, + do_lower_case=True, + never_split=None, + tokenize_chinese_chars=True, + strip_accents=None, + do_split_on_punc=True, + ): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + self.do_split_on_punc = do_split_on_punc + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + # prevents treating the same character with different unicode codepoints as different characters + unicode_normalized_text = unicodedata.normalize("NFC", text) + orig_tokens = whitespace_tokenize(unicode_normalized_text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if not self.do_split_on_punc or (never_split is not None and text in never_split): + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class CLIPTokenizer(PreTrainedTokenizer): + """ + Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding, for example when batching sequences of different lengths. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + unk_token="<|endoftext|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", # hack to enable padding + **kwargs, + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + try: + import ftfy + + self.fix_text = ftfy.fix_text + except ImportError: + logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.") + self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False) + self.fix_text = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"} + + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE, + ) + + super().__init__( + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A CLIP sequence has the following format: + + - single sequence: `<|startoftext|> X <|endoftext|>` + + Pairs of sequences are not the expected use case, but they will be handled without a separator. + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + bos_token = [self.bos_token_id] + eos_token = [self.eos_token_id] + + if token_ids_1 is None: + return bos_token + token_ids_0 + eos_token + return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of + zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + bos_token = [self.bos_token_id] + eos_token = [self.eos_token_id] + + if token_ids_1 is None: + return len(bos_token + token_ids_0 + eos_token) * [0] + return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + "",) + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + if self.fix_text is None: + text = " ".join(self.nlp.tokenize(text)) + else: + text = whitespace_clean(self.fix_text(text)).lower() + + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + byte_array = bytearray([self.byte_decoder[c] for c in text]) + text = byte_array.decode("utf-8", errors=self.errors).replace("", " ").strip() + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + "Saving vocabulary to {}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!".format(merge_file) + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file diff --git a/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip_fast.py b/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..6198958a034f433be6ce9fe522c02947f2835d92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip_fast.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI GPT.""" + + +from typing import List, Optional, Tuple + +from tokenizers import pre_tokenizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_clip import CLIPTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + + +class CLIPTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" CLIP tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level + Byte-Pair-Encoding. + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`, *optional*): + Path to the vocabulary file. + merges_file (`str`, *optional*): + Path to the merges file. + tokenizer_file (`str`, *optional*): + The path to a tokenizer file to use instead of the vocab file. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding, for example when batching sequences of different lengths. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = CLIPTokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + unk_token="<|endoftext|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + pad_token="<|endoftext|>", # hack to enable padding + **kwargs, + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + **kwargs, + ) + + if not isinstance(self.backend_tokenizer.pre_tokenizer, pre_tokenizers.Sequence): + raise ValueError( + "The `backend_tokenizer` provided does not match the expected format. The CLIP tokenizer has been" + " heavily modified from transformers version 4.17.0. You need to convert the tokenizer you are using" + " to be compatible with this version.The easiest way to do so is" + ' `CLIPTokenizerFast.from_pretrained("path_to_local_folder_or_hub_repo, from_slow=True)`. If you want' + " to use your existing tokenizer, you will have to revert to a version prior to 4.17.0 of" + " transformers." + ) + + self._wrap_decode_method_backend_tokenizer() + + # Very ugly hack to enable padding to have a correct decoding see https://github.com/huggingface/tokenizers/issues/872 + def _wrap_decode_method_backend_tokenizer(self): + orig_decode_method = self.backend_tokenizer.decode + + def new_decode_method(*args, **kwargs): + text = orig_decode_method(*args, **kwargs) + text = text.replace(self.backend_tokenizer.model.end_of_word_suffix, " ").strip() + return text + + self.backend_tokenizer.decode = new_decode_method + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A CLIP sequence has the following format: + + - single sequence: `<|startoftext|> X <|endoftext|>` + + Pairs of sequences are not the expected use case, but they will be handled without a separator. + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + bos_token = [self.bos_token_id] + eos_token = [self.eos_token_id] + + if token_ids_1 is None: + return bos_token + token_ids_0 + eos_token + return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of + zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + bos_token = [self.bos_token_id] + eos_token = [self.eos_token_id] + + if token_ids_1 is None: + return len(bos_token + token_ids_0 + eos_token) * [0] + return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c366616bc172d409df74abd40e5844f21d66223d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..823a1e582f141214286361f3e752f460128f7823 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/olmo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3cead944521b41c60f024b0b81481f90b7f09c4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/olmo/__init__.py @@ -0,0 +1,59 @@ +# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_sentencepiece_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_olmo": ["OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP", "OlmoConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_olmo"] = [ + "OlmoForCausalLM", + "OlmoModel", + "OlmoPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_olmo import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP, OlmoConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_olmo import ( + OlmoForCausalLM, + OlmoModel, + OlmoPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f6fccc0590cf9dd871656c4b46f025c1c905d5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51e766c2eecd1dca6b6803315370bd1764c83c3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/configuration_olmo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c35cbbaed62ff69c146eb47cad068262d0c7c3b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/convert_olmo_weights_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4352c2c99d0374f06a3c1446c4623e948ac548ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/olmo/__pycache__/modeling_olmo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py b/venv/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py new file mode 100644 index 0000000000000000000000000000000000000000..17a790227683bfe50b7f0320a875d4871dcfc2ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/olmo/configuration_olmo.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" OLMo model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..deprecated._archive_maps import OLMO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +logger = logging.get_logger(__name__) + + +class OlmoConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`OlmoModel`]. It is used to instantiate an OLMo + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the [allenai/OLMo-7B-hf](https://huggingface.co/allenai/OLMo-7B-hf). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50304): + Vocabulary size of the OLMo model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`OlmoModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*, defaults to 1): + Padding token id. + bos_token_id (`int`, *optional*): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 50279): + End of stream token id. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is + `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an + experimental feature, subject to breaking API changes in future versions. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + clip_qkv (`float`, *optional*): + If not `None`, elements of query, key and value attention states are clipped so that their + absolute value does not exceed this value. + + ```python + >>> from transformers import OlmoModel, OlmoConfig + + >>> # Initializing a OLMo 7B style configuration + >>> configuration = OlmoConfig() + + >>> # Initializing a model from the OLMo 7B style configuration + >>> model = OlmoModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "olmo" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=50304, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + use_cache=True, + pad_token_id=1, + bos_token_id=None, + eos_token_id=50279, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + clip_qkv=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.clip_qkv = clip_qkv + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0e77bdc69e7a0ca713a1696a486576dfd051f059 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/olmo/convert_olmo_weights_to_hf.py @@ -0,0 +1,248 @@ +# Copyright 2024 EleutherAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os +import shutil +from pathlib import Path + +import torch +import yaml +from tokenizers import Tokenizer + +from transformers import OlmoConfig, OlmoForCausalLM +from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast + + +""" +Sample usage: + +``` +python src/transformers/models/olmo/convert_olmo_weights_to_hf.py \ + --input_dir /path/to/downloaded/olmo/weights --model_size 7B --output_dir /output/path +``` + +Thereafter, models can be loaded via: + +```py +from transformers import OlmoForCausalLM, AutoTokenizer + +model = OlmoForCausalLM.from_pretrained("/output/path") +tokenizer = AutoTokenizer.from_pretrained("/output/path") +``` + +Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions +come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). +""" + + +def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): + return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) + + +def read_json(path): + with open(path, "r") as f: + return json.load(f) + + +def write_json(text, path): + with open(path, "w") as f: + json.dump(text, f) + + +def write_model(model_path, input_base_path, tokenizer_path=None, safe_serialization=True, fix_eos_token_id=True): + os.makedirs(model_path, exist_ok=True) + tmp_model_path = os.path.join(model_path, "tmp") + os.makedirs(tmp_model_path, exist_ok=True) + + config_path = Path(input_base_path) / "config.yaml" + olmo_config = yaml.safe_load(config_path.read_text())["model"] + + n_layers = olmo_config["n_layers"] + n_heads = olmo_config["n_heads"] + dim = olmo_config["d_model"] + dims_per_head = dim // n_heads + base = 10000.0 + inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + max_position_embeddings = olmo_config["max_sequence_length"] + + vocab_size = olmo_config.get("embedding_size", olmo_config["vocab_size"]) + + if olmo_config.get("n_kv_heads", None) is not None: + num_key_value_heads = olmo_config["n_kv_heads"] # for GQA / MQA + elif olmo_config["multi_query_attention"]: # compatibility with other checkpoints + num_key_value_heads = 1 + else: + num_key_value_heads = n_heads + + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + + # Not sharded + # (The sharded implementation would also work, but this is simpler.) + loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu") + + param_count = 0 + index_dict = {"weight_map": {}} + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + # Unsharded + # TODO: Layernorm stuff + # TODO: multi query attention + fused_dims = [dim, dims_per_head * num_key_value_heads, dims_per_head * num_key_value_heads] + q_proj_weight, k_proj_weight, v_proj_weight = torch.split( + loaded[f"transformer.blocks.{layer_i}.att_proj.weight"], fused_dims, dim=0 + ) + up_proj_weight, gate_proj_weight = torch.chunk( + loaded[f"transformer.blocks.{layer_i}.ff_proj.weight"], 2, dim=0 + ) + state_dict = { + f"model.layers.{layer_i}.self_attn.q_proj.weight": q_proj_weight, + f"model.layers.{layer_i}.self_attn.k_proj.weight": k_proj_weight, + f"model.layers.{layer_i}.self_attn.v_proj.weight": v_proj_weight, + f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"transformer.blocks.{layer_i}.attn_out.weight"], + f"model.layers.{layer_i}.mlp.gate_proj.weight": gate_proj_weight, + f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"transformer.blocks.{layer_i}.ff_out.weight"], + f"model.layers.{layer_i}.mlp.up_proj.weight": up_proj_weight, + } + + state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + + # Unsharded + # TODO: Deal with weight-tying + state_dict = { + "model.embed_tokens.weight": loaded["transformer.wte.weight"], + "lm_head.weight": loaded["transformer.ff_out.weight"] + if "transformer.ff_out.weight" in loaded + else loaded["transformer.wte.weight"], + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + # Write configs + index_dict["metadata"] = {"total_size": param_count * 2} + write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) + + if olmo_config.get("mlp_hidden_size", None) is not None: + intermediate_size = olmo_config["mlp_hidden_size"] // 2 + else: + intermediate_size = (dim * olmo_config["mlp_ratio"]) // 2 + + config = OlmoConfig( + vocab_size=vocab_size, + hidden_size=dim, + intermediate_size=intermediate_size, + num_hidden_layers=n_layers, + num_attention_heads=n_heads, + num_key_value_heads=num_key_value_heads, + max_position_embeddings=max_position_embeddings, + pad_token_id=olmo_config["pad_token_id"], + bos_token_id=None, + eos_token_id=olmo_config["eos_token_id"], + tie_word_embeddings=olmo_config["weight_tying"], + rope_theta=base, + clip_qkv=olmo_config.get("clip_qkv"), + ) + config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + if tokenizer_path is not None: + _write_tokenizer(model_path, config, tokenizer_path, fix_eos_token_id) + + print("Loading the checkpoint in a OLMo model.") + model = OlmoForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.float32, low_cpu_mem_usage=True) + # Avoid saving this as part of the config. + del model.config._name_or_path + print("Saving in the Transformers format.") + model.save_pretrained(model_path, safe_serialization=safe_serialization) + shutil.rmtree(tmp_model_path) + + +def _write_tokenizer( + output_path: Path, config: OlmoConfig, input_tokenizer_path: Path, fix_eos_token_id: bool = True +) -> None: + print(f"Saving a {GPTNeoXTokenizerFast.__name__} to {output_path}.") + + base_tokenizer = Tokenizer.from_file(str(input_tokenizer_path)) + + eos_token_id = config.eos_token_id if config.eos_token_id is not None else base_tokenizer.get_vocab_size() - 1 + pad_token_id = config.pad_token_id if config.pad_token_id is not None else eos_token_id + + if fix_eos_token_id and eos_token_id == 0: + # Fixing a bug in OLMo where eos token id was incorrectly set + print("Changing eos_token_id from 0 to 50279.") + eos_token_id = 50279 + + tokenizer = GPTNeoXTokenizerFast( + tokenizer_object=base_tokenizer, + eos_token=base_tokenizer.decode([eos_token_id], skip_special_tokens=False), + pad_token=base_tokenizer.decode([pad_token_id], skip_special_tokens=False), + unk_token=None, + bos_token=None, + ) + + tokenizer.save_pretrained(output_path) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_dir", + required=True, + help="Location of OLMo weights, which contains config.yaml and model.pt.", + ) + parser.add_argument( + "--tokenizer_json_path", + default=None, + help="Location of OLMo tokenizer json file.", + ) + parser.add_argument( + "--output_dir", + required=True, + help="Location to write HF model and tokenizer", + ) + parser.add_argument( + "--no_fix_eos_token_id", + action="store_false", + dest="fix_eos_token_id", + help="If set, does not change eos token id from 0 to 50279 if it is 0. Changing 0 to 50279 is a bug fix, so use this option with care.", + ) + parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") + # Different OLMo versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used. + args = parser.parse_args() + write_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + safe_serialization=args.safe_serialization, + tokenizer_path=args.tokenizer_json_path, + fix_eos_token_id=args.fix_eos_token_id, + ) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py b/venv/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py new file mode 100644 index 0000000000000000000000000000000000000000..83637536a12531a42e4dae6a26b37028da116a0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/olmo/modeling_olmo.py @@ -0,0 +1,1325 @@ +# coding=utf-8 +# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch OLMo model.""" + +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache, StaticCache +from ...modeling_attn_mask_utils import AttentionMaskConverter +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import ALL_LAYERNORM_LAYERS +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_olmo import OlmoConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "OlmoConfig" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class OlmoLayerNorm(nn.Module): + """LayerNorm but with no learnable weight or bias.""" + + def __init__(self, hidden_size: int) -> None: + super().__init__() + self.normalized_shape = (hidden_size,) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + orig_dtype = hidden_states.dtype + return F.layer_norm(hidden_states.to(dtype=torch.float32), self.normalized_shape, None, None, eps=1e-5).to( + orig_dtype + ) + + +ALL_LAYERNORM_LAYERS.append(OlmoLayerNorm) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Olmo +class OlmoRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + super().__init__() + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + t = t / self.scaling_factor + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False) + self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False) + + @property + def sin_cached(self): + logger.warning_once( + "The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " + "the forward method of RoPE from now on instead. It is not used in the `OlmoAttention` class" + ) + return self._sin_cached + + @property + def cos_cached(self): + logger.warning_once( + "The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " + "the forward method of RoPE from now on instead. It is not used in the `OlmoAttention` class" + ) + return self._cos_cached + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->Olmo +class OlmoLinearScalingRotaryEmbedding(OlmoRotaryEmbedding): + """OlmoRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def forward(self, x, position_ids): + # difference to the original RoPE: a scaling factor is aplied to the position ids + position_ids = position_ids.float() / self.scaling_factor + cos, sin = super().forward(x, position_ids) + return cos, sin + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->Olmo +class OlmoDynamicNTKScalingRotaryEmbedding(OlmoRotaryEmbedding): + """OlmoRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def forward(self, x, position_ids): + # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length + seq_len = torch.max(position_ids) + 1 + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / ( + base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim) + ) + self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation + + cos, sin = super().forward(x, position_ids) + return cos, sin + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class OlmoMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class OlmoAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + # Copied from transformers.models.llama.modeling_llama.LlamaAttention.__init__ with Llama->Olmo + def __init__(self, config: OlmoConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) + self._init_rope() + + # Copied from transformers.models.llama.modeling_llama.LlamaAttention._init_rope with Llama->Olmo + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = OlmoRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = OlmoLinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = OlmoDynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + if self.config.clip_qkv is not None: + query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + past_key_value = getattr(self, "past_key_value", past_key_value) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class OlmoFlashAttention2(OlmoAttention): + """ + OLMo flash attention module. This module inherits from `OlmoAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + if self.config.clip_qkv is not None: + query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (OlmoRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with Llama->Olmo + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in OlmoFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class OlmoSdpaAttention(OlmoAttention): + """ + OLMo attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `OlmoAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from OlmoAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "OlmoModel is using OlmoSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + if self.config.clip_qkv is not None: + query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + # In case static cache is used, it is an instance attribute. + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + # if attention_mask is not None and cache_position is not None: + if attention_mask is not None: + causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and causal_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=causal_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +OLMO_ATTENTION_CLASSES = { + "eager": OlmoAttention, + "flash_attention_2": OlmoFlashAttention2, + "sdpa": OlmoSdpaAttention, +} + + +class OlmoDecoderLayer(nn.Module): + def __init__(self, config: OlmoConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = OLMO_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) + + self.mlp = OlmoMLP(config) + self.input_layernorm = OlmoLayerNorm(config.hidden_size) + self.post_attention_layernorm = OlmoLayerNorm(config.hidden_size) + + # Copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +OLMO_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`OlmoConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Olmo Model outputting raw hidden-states without any specific head on top.", + OLMO_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->Olmo +class OlmoPreTrainedModel(PreTrainedModel): + config_class = OlmoConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["OlmoDecoderLayer"] + _skip_keys_device_placement = ["past_key_values"] + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None): + if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache: + raise ValueError( + "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " + "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" + ) + + for layer in self.model.layers: + device = layer.input_layernorm.weight.device + if hasattr(self.config, "_pre_quantization_dtype"): + dtype = self.config._pre_quantization_dtype + else: + dtype = layer.self_attn.o_proj.weight.dtype + layer.self_attn.past_key_value = cache_cls( + self.config, max_batch_size, max_cache_len, device=device, dtype=dtype + ) + + def _reset_cache(self): + for layer in self.model.layers: + layer.self_attn.past_key_value = None + + +OLMO_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + + +@add_start_docstrings( + "The bare Olmo Model outputting raw hidden-states without any specific head on top.", + OLMO_START_DOCSTRING, +) +class OlmoModel(OlmoPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OlmoDecoderLayer`] + + Args: + config: OlmoConfig + """ + + def __init__(self, config: OlmoConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [OlmoDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.norm = OlmoLayerNorm(config.hidden_size) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(OLMO_INPUTS_DOCSTRING) + # Copied from transformers.models.llama.modeling_llama.LlamaModel.forward + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + past_seen_tokens = 0 + if use_cache: # kept for BC (cache positions) + if not isinstance(past_key_values, StaticCache): + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_seen_tokens = past_key_values.get_seq_length() + + if cache_position is None: + if isinstance(past_key_values, StaticCache): + raise ValueError("cache_position is a required argument when using StaticCache.") + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = ( + next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache + ) + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_seen_tokens: int, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + if self.config._attn_implementation == "sdpa": + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, + # in order to dispatch on Flash Attention 2. + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache + target_length = self.config.max_position_embeddings + else: # dynamic cache + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + if attention_mask.dim() == 2: + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) + causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) + elif attention_mask.dim() == 4: + # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with + # cache. In that case, the 4D attention mask attends to the newest tokens only. + if attention_mask.shape[-2] < cache_position[0] + sequence_length: + offset = cache_position[0] + else: + offset = 0 + mask_shape = attention_mask.shape + mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype + causal_mask[ + : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3] + ] = mask_slice + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->OLMO,Llama->Olmo +class OlmoForCausalLM(OlmoPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = OlmoModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(OLMO_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + # Ignore copy + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, OlmoForCausalLM + + >>> model = OlmoForCausalLM.from_pretrained("allenai/OLMo-1B-hf") + >>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m' + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs + ): + # With static cache, the `past_key_values` is None + # TODO joao: standardize interface for the different Cache classes and remove of this if + has_static_cache = False + if past_key_values is None: + past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None) + has_static_cache = past_key_values is not None + + past_length = 0 + if past_key_values is not None: + if isinstance(past_key_values, Cache): + past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length() + max_cache_length = ( + torch.tensor(past_key_values.get_max_length(), device=input_ids.device) + if past_key_values.get_max_length() is not None + else None + ) + cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length) + # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise + # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114 + # TODO: use `next_tokens` directly instead. + model_inputs = {"input_ids": input_ids.contiguous()} + + input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] + if cache_position is None: + cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) + else: + cache_position = cache_position[-input_length:] + + if has_static_cache: + past_key_values = None + + model_inputs.update( + { + "position_ids": position_ids, + "cache_position": cache_position, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc4caef2da10a13c3b135463d51e115b542ace6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_seamless_m4t_v2": ["SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "SeamlessM4Tv2Config"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_seamless_m4t_v2"] = [ + "SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST", + "SeamlessM4Tv2ForTextToSpeech", + "SeamlessM4Tv2ForSpeechToSpeech", + "SeamlessM4Tv2ForTextToText", + "SeamlessM4Tv2ForSpeechToText", + "SeamlessM4Tv2Model", + "SeamlessM4Tv2PreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_seamless_m4t_v2 import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_seamless_m4t_v2 import ( + SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST, + SeamlessM4Tv2ForSpeechToSpeech, + SeamlessM4Tv2ForSpeechToText, + SeamlessM4Tv2ForTextToSpeech, + SeamlessM4Tv2ForTextToText, + SeamlessM4Tv2Model, + SeamlessM4Tv2PreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914dab5e202cc2a96ce585be8df59dbd6c4abe30 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..306a08a0983aae54b2f54991196943bb6c635d26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/configuration_seamless_m4t_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5cbd6fb59547e21feae4316ac44a7856fb03fd8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/convert_fairseq2_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c327c1dec8d73b07c1007957e3ead015cf198bcf Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..e03523d3e0d8b4310f20e4ce7c85c34a19a44531 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py @@ -0,0 +1,425 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" SeamlessM4Tv2 model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class SeamlessM4Tv2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~SeamlessM4Tv2Model`]. It is used to instantiate + an SeamlessM4Tv2 model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the SeamlessM4Tv2 + [""](https://huggingface.co/"") architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 256102): + Vocabulary size of the text modality of the SeamlessM4Tv2 model. Defines the number of different tokens + that can be represented by the `inputs_ids` passed when calling [`~SeamlessM4Tv2Model`], + [`~SeamlessM4Tv2ForTextToSpeech`] or [`~SeamlessM4Tv2ForTextToText`]. + t2u_vocab_size (`int`, *optional*, defaults to 10082): + Unit vocabulary size of the SeamlessM4Tv2 model. Defines the number of different "unit tokens" that can be + represented by the `inputs_ids` passed when calling the Text-To-Units sub-model of [`~SeamlessM4Tv2Model`], + [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`]. + char_vocab_size (`int`, *optional*, defaults to 10943): + Character vocabulary size of the SeamlessM4Tv2 model. Defines the number of different character tokens that + can be represented by the `char_inputs_ids` passed when calling the Text-To-Units sub-model of + [`~SeamlessM4Tv2Model`], [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`]. + + > Parameters shared across sub-models + + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the "intermediate" layers in the architecture. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + max_position_embeddings (`int`, *optional*, defaults to 4096): + The maximum sequence length that this model text encoder and decoder might ever be used with. Typically set + this to something large just in case (e.g., 512 or 1024 or 2048). + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether the model is used as an encoder/decoder or not. + encoder_layerdrop (`float`, *optional*, defaults to 0.05): + The LayerDrop probability for the encoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.05): + The LayerDrop probability for the decoders. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + activation_function (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the decoder and feed-forward layers. If string, + `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, decoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all attention layers. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all activation layers in the model. + scale_embedding (`bool`, *optional*, defaults to `True`): + Scale embeddings by diving by sqrt(d_model). + + > Text encoder and text decoder specific parameters + + encoder_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer text encoder. + encoder_ffn_dim (`int`, *optional*, defaults to 8192): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text encoder. + encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer text encoder. + decoder_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer text decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 8192): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text decoder. + decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer text decoder. + decoder_start_token_id (`int`, *optional*, defaults to 3): + If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token. Only + applied in the text decoder. + max_new_tokens (`int`, *optional*, defaults to 256): + The maximum numbers of text tokens to generate, ignoring the number of tokens in the prompt. + pad_token_id (`int`, *optional*, defaults to 0): + The id of the _padding_ text token. Only applied to the text-decoder model. + bos_token_id (`int`, *optional*, defaults to 2): + The id of the _beginning-of-stream_ text token. Only applied to the text-decoder model. + eos_token_id (`int`, *optional*, defaults to 3): + The id of the _end-of-stream_ text token. Only applied to the text-decoder model. + + > Speech encoder specific parameters + + speech_encoder_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer speech encoder. + speech_encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer speech encoder. + speech_encoder_intermediate_size (`int`, *optional*, defaults to 4096): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer speech encoder. + speech_encoder_hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): + The non-linear activation function (function or string) in the speech encoder. If string, `"gelu"`, + `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. + speech_encoder_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all layers in the speech encoder. + add_adapter (`bool`, *optional*, defaults to `True`): + Add an adapter layer on top of the speech encoder. + speech_encoder_layerdrop (`float`, *optional*, defaults to 0.1): + The LayerDrop probability for the speech encoder. See the [LayerDrop paper](see + https://arxiv.org/abs/1909.11556) for more details. + feature_projection_input_dim (`int`, *optional*, defaults to 160): + Input dimension of the input feature projection of the speech encoder, i.e the dimension after processing + input audios with [`SeamlessM4TFeatureExtractor`]. + adaptor_kernel_size (`int`, *optional*, defaults to 8): + Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. + adaptor_stride (`int`, *optional*, defaults to 8): + Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. + adaptor_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all layers in the speech adapter. + num_adapter_layers (`int`, *optional*, defaults to 1): + Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is + True`. + position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`): + Can be specified to `relative_key`. If left to `None`, no relative position embedding is applied. Only + applied to the speech encoder. For more information on `"relative_key"`, please refer to [Self-Attention + with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + conv_depthwise_kernel_size (`int`, *optional*, defaults to 31): + Kernel size of convolutional depthwise 1D layer in Conformer blocks. Only applied to the speech encoder. + left_max_position_embeddings (`int`, *optional*, defaults to 64): + The left clipping value for relative positions. + right_max_position_embeddings (`int`, *optional*, defaults to 8): + The right clipping value for relative positions. + speech_encoder_chunk_size (`int`, *optional*, defaults to 20000): The size of each attention chunk. + speech_encoder_left_chunk_num (`int`, *optional*, defaults to 128): + Number of chunks on the left up to which lookahead is allowed. + + > Text-To-Unit (t2u) model specific parameters + + t2u_bos_token_id (`int`, *optional*, defaults to 0): + The id of the _beginning-of-stream_ unit token. Only applied to the text-to-unit seq2seq model. + t2u_pad_token_id (`int`, *optional*, defaults to 1): + The id of the _padding_ unit token. Only applied to the text-to-unit seq2seq model. + t2u_eos_token_id (`int`, *optional*, defaults to 2): + The id of the _end-of-stream_ unit token. Only applied to the text-to-unit seq2seq model. + t2u_encoder_layers (`int`, *optional*, defaults to 6): + Number of hidden layers in the Transformer text-to-unit encoder. + t2u_encoder_ffn_dim (`int`, *optional*, defaults to 8192): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit encoder. + t2u_encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer text-to-unit encoder. + t2u_decoder_layers (`int`, *optional*, defaults to 6): + Number of hidden layers in the Transformer text-to-unit decoder. + t2u_decoder_ffn_dim (`int`, *optional*, defaults to 8192): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer text-to-unit decoder. + t2u_decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer text-to-unit decoder. + t2u_max_position_embeddings (`int`, *optional*, defaults to 4096): + The maximum sequence length that this model text-to-unit component might ever be used with. Typically set + this to something large just in case (e.g., 512 or 1024 or 2048). + t2u_variance_predictor_embed_dim (`int`, *optional*, defaults to 1024): + The projection dimension of the text-to-unit's duration predictor. + t2u_variance_predictor_hidden_dim (`int`, *optional*, defaults to 256): + Internal dimension of the text-to-unit's duration predictor. + t2u_variance_predictor_kernel_size (`int`, *optional*, defaults to 3): + Kernel size of the convolutional layers of the text-to-unit's duration predictor. + t2u_variance_pred_dropout (`float`, *optional*, defaults to 0.5): + The dropout probability of the text-to-unit's duration predictor. + + > Hifi-Gan Vocoder specific parameters + + sampling_rate (`int`, *optional*, defaults to 16000): + The sampling rate at which the output audio will be generated, expressed in hertz (Hz). + upsample_initial_channel (`int`, *optional*, defaults to 512): + The number of input channels into the hifi-gan upsampling network. Applies to the vocoder only. + upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[5, 4, 4, 2, 2]`): + A tuple of integers defining the stride of each 1D convolutional layer in the vocoder upsampling network. + The length of *upsample_rates* defines the number of convolutional layers and has to match the length of + *upsample_kernel_sizes*. Applies to the vocoder only. + upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[11, 8, 8, 4, 4]`): + A tuple of integers defining the kernel size of each 1D convolutional layer in the vocoder upsampling + network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match + the length of *upsample_rates*. Applies to the vocoder only. + resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`): + A tuple of integers defining the kernel sizes of the vocoder 1D convolutional layers in the multi-receptive + field fusion (MRF) module. Applies to the vocoder only. + resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): + A nested tuple of integers defining the dilation rates of the vocoder dilated 1D convolutional layers in + the multi-receptive field fusion (MRF) module. Applies to the vocoder only. + leaky_relu_slope (`float`, *optional*, defaults to 0.1): + The angle of the negative slope used by the leaky ReLU activation in the vocoder. Applies to the vocoder + only. + unit_hifi_gan_vocab_size (`int`, *optional*, defaults to 10000): + Vocabulary size of the SeamlessM4Tv2 vocoder. Defines the number of different unit tokens that can be + represented by the `inputs_ids` passed when calling the vocoder of [`~SeamlessM4Tv2Model`], + [`~SeamlessM4Tv2ForSpeechToSpeech`] or [`~SeamlessM4Tv2ForTextToSpeech`]. + unit_embed_dim (`int`, *optional*, defaults to 1280): + The projection dimension of the input ids given to the hifi-gan vocoder. Applies to the vocoder only. + lang_embed_dim (`int`, *optional*, defaults to 256): + The projection dimension of the target language given to the hifi-gan vocoder. Applies to the vocoder only. + spkr_embed_dim (`int`, *optional*, defaults to 256): + The projection dimension of the speaker id given to the hifi-gan vocoder. Applies to the vocoder only. + vocoder_num_langs (`int`, *optional*, defaults to 36): + Number of langs supported by the vocoder. Might be different from `t2u_num_langs`. + vocoder_num_spkrs (`int`, *optional*, defaults to 200): + Number of speakers supported by the vocoder. + variance_predictor_kernel_size (`int`, *optional*, defaults to 3): + Kernel size of the duration predictor. Applies to the vocoder only. + var_pred_dropout (`float`, *optional*, defaults to 0.5): + The dropout probability of the duration predictor. Applies to the vocoder only. + vocoder_offset (`int`, *optional*, defaults to 4): + Offset the unit token ids by this number to account for symbol tokens. Applies to the vocoder only. + + ```python + >>> from transformers import SeamlessM4Tv2Model, SeamlessM4Tv2Config + + >>> # Initializing a SeamlessM4Tv2 "" style configuration + >>> configuration = SeamlessM4Tv2Config() + + >>> # Initializing a model from the "" style configuration + >>> model = SeamlessM4Tv2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "seamless_m4t_v2" + + def __init__( + self, + vocab_size=256102, + t2u_vocab_size=10082, + char_vocab_size=10943, + # shared config + hidden_size=1024, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_cache=True, + max_position_embeddings=4096, + is_encoder_decoder=True, + encoder_layerdrop=0.05, + decoder_layerdrop=0.05, + activation_function="relu", + dropout=0.1, + attention_dropout=0.1, + activation_dropout=0.0, + scale_embedding=True, + # text encoder|decoder + encoder_layers=24, + encoder_ffn_dim=8192, + encoder_attention_heads=16, + decoder_layers=24, + decoder_ffn_dim=8192, + decoder_attention_heads=16, + decoder_start_token_id=3, + max_new_tokens=256, + pad_token_id=0, + bos_token_id=2, + eos_token_id=3, + # speech_encoder + speech_encoder_layers=24, + speech_encoder_attention_heads=16, + speech_encoder_intermediate_size=4096, + speech_encoder_hidden_act="swish", + speech_encoder_dropout=0.0, + add_adapter=True, + speech_encoder_layerdrop=0.1, + feature_projection_input_dim=160, + adaptor_kernel_size=8, + adaptor_stride=8, + adaptor_dropout=0.1, + num_adapter_layers=1, + position_embeddings_type="relative_key", + conv_depthwise_kernel_size=31, + left_max_position_embeddings=64, + right_max_position_embeddings=8, + speech_encoder_chunk_size=20000, + speech_encoder_left_chunk_num=128, + # t2u config + t2u_bos_token_id=0, + t2u_pad_token_id=1, + t2u_eos_token_id=2, + t2u_encoder_layers=6, + t2u_encoder_ffn_dim=8192, + t2u_encoder_attention_heads=16, + t2u_decoder_layers=6, + t2u_decoder_ffn_dim=8192, + t2u_decoder_attention_heads=16, + t2u_max_position_embeddings=4096, + t2u_variance_predictor_embed_dim=1024, + t2u_variance_predictor_hidden_dim=256, + t2u_variance_predictor_kernel_size=3, + t2u_variance_pred_dropout=0.5, + # hifi-gan vocoder config + sampling_rate=16000, + upsample_initial_channel=512, + upsample_rates=[5, 4, 4, 2, 2], + upsample_kernel_sizes=[11, 8, 8, 4, 4], + resblock_kernel_sizes=[3, 7, 11], + resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], + leaky_relu_slope=0.1, + # specific to Code Hifi-Gan + unit_hifi_gan_vocab_size=10000, + unit_embed_dim=1280, + lang_embed_dim=256, + spkr_embed_dim=256, + vocoder_num_langs=36, + vocoder_num_spkrs=200, + variance_predictor_kernel_size=3, + var_pred_dropout=0.5, + vocoder_offset=4, + **kwargs, + ): + # overall_config + self.vocab_size = vocab_size + self.t2u_vocab_size = t2u_vocab_size + self.char_vocab_size = char_vocab_size + self.hidden_size = hidden_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.max_position_embeddings = max_position_embeddings + self.use_cache = use_cache + self.max_new_tokens = max_new_tokens + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.activation_function = activation_function + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.scale_embedding = scale_embedding + # for proper config init + self.num_attention_heads = decoder_attention_heads + self.num_hidden_layers = decoder_layers + + # text|unit encoder|decoder + self.encoder_layers = encoder_layers + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_attention_heads = encoder_attention_heads + self.decoder_layers = decoder_layers + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_attention_heads = decoder_attention_heads + + # speech_encoder + self.speech_encoder_layers = speech_encoder_layers + self.speech_encoder_hidden_act = speech_encoder_hidden_act + self.speech_encoder_dropout = speech_encoder_dropout + self.speech_encoder_attention_heads = speech_encoder_attention_heads + self.speech_encoder_layerdrop = speech_encoder_layerdrop + self.speech_encoder_intermediate_size = speech_encoder_intermediate_size + self.feature_projection_input_dim = feature_projection_input_dim + self.adaptor_kernel_size = adaptor_kernel_size + self.adaptor_stride = adaptor_stride + self.adaptor_dropout = adaptor_dropout + self.num_adapter_layers = num_adapter_layers + self.position_embeddings_type = position_embeddings_type + self.conv_depthwise_kernel_size = conv_depthwise_kernel_size + self.add_adapter = add_adapter + self.left_max_position_embeddings = left_max_position_embeddings + self.right_max_position_embeddings = right_max_position_embeddings + self.speech_encoder_chunk_size = speech_encoder_chunk_size + self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num + + # t2u config + self.t2u_bos_token_id = t2u_bos_token_id + self.t2u_pad_token_id = t2u_pad_token_id + self.t2u_eos_token_id = t2u_eos_token_id + self.t2u_encoder_layers = t2u_encoder_layers + self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim + self.t2u_encoder_attention_heads = t2u_encoder_attention_heads + self.t2u_decoder_layers = t2u_decoder_layers + self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim + self.t2u_decoder_attention_heads = t2u_decoder_attention_heads + self.t2u_max_position_embeddings = t2u_max_position_embeddings + self.t2u_variance_predictor_embed_dim = t2u_variance_predictor_embed_dim # TODO: add to docstrings + self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim # TODO: add to docstrings + self.t2u_variance_predictor_kernel_size = t2u_variance_predictor_kernel_size # TODO: add to docstrings + self.t2u_variance_pred_dropout = t2u_variance_pred_dropout # TODO: add to docstrings + + # hifi-gan vocoder config + # original parameters specific to Hifi-Gan + self.sampling_rate = sampling_rate + self.upsample_initial_channel = upsample_initial_channel + self.upsample_rates = upsample_rates + self.upsample_kernel_sizes = upsample_kernel_sizes + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.leaky_relu_slope = leaky_relu_slope + + # specific to Code Hifi-Gan + self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size + self.unit_embed_dim = unit_embed_dim + self.lang_embed_dim = lang_embed_dim + self.spkr_embed_dim = spkr_embed_dim + self.vocoder_num_langs = vocoder_num_langs + self.vocoder_num_spkrs = vocoder_num_spkrs + self.variance_predictor_kernel_size = variance_predictor_kernel_size + self.var_pred_dropout = var_pred_dropout + self.vocoder_offset = vocoder_offset + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + decoder_start_token_id=decoder_start_token_id, + is_encoder_decoder=is_encoder_decoder, + max_position_embeddings=max_position_embeddings, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4320cff5bd9bb80670b0ee7db752fdf6f38e60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/convert_fairseq2_to_hf.py @@ -0,0 +1,405 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Converting Meta SeamlessM4Tv2 checkpoints from seamless_communication to HF.""" + + +import argparse +import os +from pathlib import Path + +import torch +from accelerate.utils.modeling import find_tied_parameters +from seamless_communication.inference import Translator + +from transformers import ( + SeamlessM4TFeatureExtractor, + SeamlessM4TProcessor, + SeamlessM4TTokenizer, + SeamlessM4Tv2Config, + SeamlessM4Tv2Model, +) +from transformers.utils import logging + + +# fmt: off +UNIT_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kan__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tam__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__", ] +# fmt: on + +# fmt: off +VOCODER_SUPPORTED_LANGUAGES = ["__arb__", "__ben__", "__cat__", "__ces__", "__cmn__", "__cym__", "__dan__", "__deu__", "__eng__", "__est__", "__fin__", "__fra__", "__hin__", "__ind__", "__ita__", "__jpn__", "__kor__", "__mlt__", "__nld__", "__pes__", "__pol__", "__por__", "__ron__", "__rus__", "__slk__", "__spa__", "__swe__", "__swh__", "__tel__", "__tgl__", "__tha__", "__tur__", "__ukr__", "__urd__", "__uzn__", "__vie__",] +# fmt: on + +# fmt: off +LARGE_SUPPORTED_LANGUAGES = ["afr","amh","arb","ary","arz","asm","azj","bel","ben","bos","bul","cat","ceb","ces","ckb","cmn","cmn_Hant","cym","dan","deu","ell","eng","est","eus","fin","fra","fuv","gaz","gle","glg","guj","heb","hin","hrv","hun","hye","ibo","ind","isl","ita","jav","jpn","kan","kat","kaz","khk","khm","kir","kor","lao","lit","lug","luo","lvs","mai","mal","mar","mkd","mlt","mni","mya","nld","nno","nob","npi","nya","ory","pan","pbt","pes","pol","por","ron","rus","sat","slk","slv","sna","snd","som","spa","srp","swe","swh","tam","tel","tgk","tgl","tha","tur","ukr","urd","uzn","vie","yor","yue","zlm","zul",] +# fmt: on + + +def assert_param_count(model_1, model_2): + count_1 = sum(p[1].numel() for p in model_1.named_parameters() if "final_proj" not in p[0]) + count_2 = sum(p[1].numel() for p in model_2.named_parameters() if "final_proj" not in p[0]) + assert count_1 == count_2, f"{model_1.__class__}: {count_1} != {model_2.__class__}: {count_2}" + + +def param_count(model): + return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0]) + + +def _grab_best_device(use_gpu=True): + if torch.cuda.device_count() > 0 and use_gpu: + device = "cuda" + else: + device = "cpu" + return torch.device(device) + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +vocoder_convert_list = [ + ("ups", "hifi_gan.upsampler"), + ("conv_pre", "hifi_gan.conv_pre"), + ("resblocks", "hifi_gan.resblocks"), + ("conv_post", "hifi_gan.conv_post"), + ("lang", "language_embedding"), + ("spkr", "speaker_embedding"), + ("dict.", "unit_embedding."), + ("dur_predictor.conv1.0", "dur_predictor.conv1"), + ("dur_predictor.conv2.0", "dur_predictor.conv2"), +] + +# order is important +wav2vec_convert_list = [ + ("speech_encoder_frontend.model_dim_proj", "feature_projection.projection"), + ("speech_encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"), + ("speech_encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"), + ("speech_encoder.inner.layers", "encoder.layers"), + ("speech_encoder.inner_layer_norm", "encoder.layer_norm"), + ("speech_encoder.adaptor_layers", "adapter.layers"), + ("inner_proj", "intermediate_dense"), + ("self_attn.output_proj", "self_attn.linear_out"), + ("output_proj", "output_dense"), + ("self_attn.k_proj", "self_attn.linear_k"), + ("self_attn.v_proj", "self_attn.linear_v"), + ("self_attn.q_proj", "self_attn.linear_q"), + ("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"), + ("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"), + ("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"), + ("self_attn.sdpa.r_proj", "self_attn.linear_pos"), + ("conv.pointwise_conv1", "conv_module.pointwise_conv1"), + ("conv.pointwise_conv2", "conv_module.pointwise_conv2"), + ("conv.depthwise_conv", "conv_module.depthwise_conv"), + ("conv.batch_norm", "conv_module.batch_norm"), + ("conv.layer_norm", "conv_module.depthwise_layer_norm"), + ("conv_layer_norm", "conv_module.layer_norm"), + ("speech_encoder.proj1", "intermediate_ffn.intermediate_dense"), + ("speech_encoder.proj2", "intermediate_ffn.output_dense"), + ("speech_encoder.layer_norm", "inner_layer_norm"), +] + +t2u_convert_list = [ + ("t2u_model.final_proj", "lm_head"), + ("t2u_model.", "model."), + ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"), + ("encoder_decoder_attn", "cross_attention"), + ("linear_k", "k_proj"), + ("linear_v", "v_proj"), + ("linear_q", "q_proj"), + ("ffn.inner_proj", "ffn.fc1"), + ("ffn.output_proj", "ffn.fc2"), + ("output_proj", "out_proj"), + ("decoder_frontend.embed_char", "decoder.embed_char"), + ("decoder_frontend.pos_emb_alpha_char", "decoder.pos_emb_alpha_char"), + ("decoder_frontend.embed", "decoder.embed_tokens"), + ("decoder_frontend.pos_emb_alpha", "decoder.pos_emb_alpha"), + ("conv1d.conv", "conv"), + ("conv1d_layer_norm", "conv_layer_norm"), + ("decoder_frontend.variance_adaptor", "decoder"), + ("duration_predictor.conv1.0", "duration_predictor.conv1"), + ("duration_predictor.conv2.0", "duration_predictor.conv2"), +] + +text_convert_list = [ + ("text_encoder.", ""), + ("text_decoder.", ""), + ("text_encoder_frontend.embed", "embed_tokens"), + ("text_decoder_frontend.embed", "embed_tokens"), + ("encoder_decoder_attn_layer_norm", "cross_attention_layer_norm"), + ("encoder_decoder_attn", "cross_attention"), + ("linear_k", "k_proj"), + ("linear_v", "v_proj"), + ("linear_q", "q_proj"), + ("ffn.inner_proj", "ffn.fc1"), + ("ffn.output_proj", "ffn.fc2"), + ("output_proj", "out_proj"), + ("final_proj", "lm_head"), +] + +CUR_PATH = os.path.dirname(os.path.abspath(__file__)) +default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") +CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "huggingface", "hub") + + +def _load_hf_config(): + return SeamlessM4Tv2Config() + + +def _convert_model( + original_model, + hf_model, + convert_list, + device, + unwanted_prefix="model.", + filter_state_dict="speech", + exclude_state_dict=None, +): + state_dict = original_model.state_dict() + + # filter func + if isinstance(filter_state_dict, str): + + def filter_func(x): + return filter_state_dict in x[0] + + else: + + def filter_func(item): + if exclude_state_dict is not None and exclude_state_dict in item[0]: + return False + for filter_el in filter_state_dict: + if filter_el in item[0]: + return True + + return False + + state_dict = dict(filter(filter_func, state_dict.items())) + + for k, v in list(state_dict.items()): + new_k = k[len(unwanted_prefix) :] + for old_layer_name, new_layer_name in convert_list: + if old_layer_name in new_k: + new_k = new_k.replace(old_layer_name, new_layer_name) + + # must do it by hand + if ".layer_norm" in new_k and new_k.split(".layer_norm")[0][-1].isnumeric(): + new_k = new_k.replace("layer_norm", "final_layer_norm") + + state_dict[new_k] = state_dict.pop(k) + + extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys()) + extra_keys = set(extra_keys) + missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys()) + missing_keys = set({k for k in missing_keys if "final_logits_bias" not in k}) + if len(extra_keys) != 0: + raise ValueError(f"extra keys found: {extra_keys}") + if len(missing_keys) != 0: + raise ValueError(f"missing keys: {missing_keys}") + hf_model.load_state_dict(state_dict, strict=False) + n_params = param_count(hf_model) + + logger.info(f"model loaded: {round(n_params/1e6,1)}M params") + + hf_model.eval() + hf_model.to(device) + del state_dict + + return hf_model + + +def load_model(save_dir, model_type, repo_id): + """ + Meta SeamlessM4Tv2 is made of 8 main components: + - speech_encoder (#1) and speech_encoder_frontend (#2) + - t2u_model (#3) + - text_encoder (#4) and text_encoder_frontend (#5) + - text_decoder (#6) [and text_decoder_frontend (#5) = equals to text_encoder_frontend] + - final_proj (#7) + - vocoder (#8) + """ + device = _grab_best_device() + name = "seamlessM4T_v2_large" + + original_model = Translator(name, "vocoder_v2", device, dtype=torch.float32) + + ######### TOKENIZER + + langs = LARGE_SUPPORTED_LANGUAGES + langs = [f"__{lang}__" for lang in langs] + vocab_file = os.path.join(os.path.expanduser("~"), "tokenizer", model_type, "tokenizer.model") + + save_dir = os.path.join(save_dir, name) + Path(save_dir).mkdir(exist_ok=True) + + tokenizer = SeamlessM4TTokenizer(vocab_file, additional_special_tokens=langs) + + sanity_check_lang_id = tokenizer.convert_tokens_to_ids("__fra__") + + tokenizer.save_pretrained(save_dir) + tokenizer = SeamlessM4TTokenizer.from_pretrained(save_dir) + + if sanity_check_lang_id != tokenizer.convert_tokens_to_ids("__fra__"): + raise ValueError( + f"Error in tokenizer saving/loading - __fra__ lang id is not coherent: {sanity_check_lang_id} vs {tokenizer.convert_tokens_to_ids('__fra__')}" + ) + + ####### get language to ids dict + text_decoder_lang_code_to_id = {lang.replace("__", ""): tokenizer.convert_tokens_to_ids(lang) for lang in langs} + # offset: vocoder unit vocab size + 5 (for EOS/PAD/BOS/UNK/MSK) + len(supported_languages) + t2u_lang_code_to_id = { + code.replace("__", ""): i + 10005 + len(UNIT_SUPPORTED_LANGUAGES) + for i, code in enumerate(UNIT_SUPPORTED_LANGUAGES) + } + vocoder_lang_code_to_id = {code.replace("__", ""): i for i, code in enumerate(VOCODER_SUPPORTED_LANGUAGES)} + + ######### FE + + fe = SeamlessM4TFeatureExtractor(language_code=langs) + + fe.save_pretrained(save_dir) + fe = SeamlessM4TFeatureExtractor.from_pretrained(save_dir) + + processor = SeamlessM4TProcessor(feature_extractor=fe, tokenizer=tokenizer) + processor.save_pretrained(save_dir) + processor.push_to_hub(repo_id=repo_id, create_pr=True) + + processor = SeamlessM4TProcessor.from_pretrained(save_dir) + + ######## Model + + # init config + hf_config = _load_hf_config() + + ######## get id_to_text and char_to_id from original model tokenizers + id_to_text = {i: original_model.text_tokenizer.model.index_to_token(i) for i in range(hf_config.vocab_size)} + char_to_id = { + original_model.model.t2u_model.decoder_frontend.char_tokenizer.model.index_to_token(i): i for i in range(10904) + } + + # init model + hf_model = SeamlessM4Tv2Model(hf_config) + + hf_model.generation_config.__setattr__("text_decoder_lang_to_code_id", text_decoder_lang_code_to_id) + hf_model.generation_config.__setattr__("t2u_lang_code_to_id", t2u_lang_code_to_id) + hf_model.generation_config.__setattr__("vocoder_lang_code_to_id", vocoder_lang_code_to_id) + hf_model.generation_config.__setattr__("id_to_text", id_to_text) + hf_model.generation_config.__setattr__("char_to_id", char_to_id) + + # -1. take care of vocoder + # similarly to speech T5 must apply and remove weight norm + hf_model.vocoder.apply_weight_norm() + hf_model.vocoder = _convert_model( + original_model, + hf_model.vocoder, + vocoder_convert_list, + device, + unwanted_prefix="vocoder.code_generator.", + filter_state_dict="vocoder", + ) + hf_model.vocoder.remove_weight_norm() + + # 1. take care of speech encoder + wav2vec = hf_model.speech_encoder + hf_model.speech_encoder = _convert_model( + original_model, wav2vec, wav2vec_convert_list, device, unwanted_prefix="model.", filter_state_dict="speech" + ) + + # 2. take care of t2u + + hf_model.t2u_model = _convert_model( + original_model, + hf_model.t2u_model, + t2u_convert_list, + device, + unwanted_prefix="model.", + filter_state_dict="t2u_model", + ) + + # 3. take care of text encoder + hf_model.text_encoder = _convert_model( + original_model, + hf_model.text_encoder, + text_convert_list, + device, + unwanted_prefix="model.", + filter_state_dict=["model.text_encoder"], + exclude_state_dict="t2u_model", + ) + + # 4. take care of text decoder + hf_model.text_decoder = _convert_model( + original_model, + hf_model.text_decoder, + text_convert_list, + device, + unwanted_prefix="model.", + filter_state_dict=["model.text_decoder"], + exclude_state_dict="t2u_model", + ) + + # 5. take care of final proj + hf_model.lm_head = _convert_model( + original_model, + hf_model.lm_head, + [("final_proj.", "")], + device, + unwanted_prefix="model.", + filter_state_dict=["model.final_proj"], + exclude_state_dict="t2u_model", + ) + + # sanity check + print(find_tied_parameters(hf_model)) + + count_1 = param_count(hf_model) + count_2 = param_count(original_model) + + print(f"HF MODEL:{count_1}, ORIGINAL_MODEL: {count_2}, diff:{count_1 - count_2}") + print(f"HF MODEL excluding embeddings:{hf_model.num_parameters(exclude_embeddings=True)}") + + del original_model + + hf_model.generation_config._from_model_config = False + hf_model.save_pretrained(save_dir) + hf_model.push_to_hub(repo_id=repo_id, create_pr=True) + hf_model = SeamlessM4Tv2Model.from_pretrained(save_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + + parser.add_argument( + "--model_type", + default="large", + type=str, + help="Model type.", + ) + + parser.add_argument( + "--save_dir", + default="/home/ubuntu/weights_v2", + type=str, + help="Path to the output PyTorch model.", + ) + + parser.add_argument( + "--repo_id", + default="facebook/seamless-m4t-v2-large", + type=str, + help="Repo ID.", + ) + + args = parser.parse_args() + + load_model(args.save_dir, args.model_type, args.repo_id) diff --git a/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..c7f90f6c0a23f2d90806fd0582ebbdb3c70f7e71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -0,0 +1,4795 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch SeamlessM4Tv2 model.""" + + +import copy +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import Tensor, nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...deepspeed import is_deepspeed_zero3_enabled +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, + Wav2Vec2BaseModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_seamless_m4t_v2 import SeamlessM4Tv2Config + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "" +_CONFIG_FOR_DOC = "SeamlessM4Tv2Config" + + +from ..deprecated._archive_maps import SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = { + "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json", +} + + +@dataclass +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TGenerationOutput with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2GenerationOutput(ModelOutput): + """ + Class defining the generated outputs from [`SeamlessM4Tv2Model`], [`SeamlessM4Tv2ForTextToText`], + [`SeamlessM4Tv2ForTextToSpeech`], [`SeamlessM4Tv2ForSpeechToSpeech`] and [`SeamlessM4Tv2ForTextToSpeech`]. + + Args: + waveform (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + The final audio waveform predicted by the model. + waveform_lengths (`torch.IntTensor` of shape `(batch_size,)`, *optional*): + The length in samples of each element in the `waveform` batch. + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + The generated translated sequences. This is the output of the text-to-text or the speech-to-text models. + The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished + early due to the `eos_token_id`. + unit_sequences (`torch.LongTensor` of shape `(batch_size, unit_sequence_length)`, *optional*): + The generated translated unit sequences. This is the output of the text-to-units model. The second + dimension (unit_sequence_length) is either equal to `t2u_max_length` or shorter if all batches finished + early due to the `t2u_eos_token_id`. + """ + + waveform: Optional[torch.FloatTensor] = None + waveform_lengths: Optional[torch.IntTensor] = None + sequences: Optional[Tuple[torch.FloatTensor]] = None + unit_sequences: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SeamlessM4Tv2TextToUnitDecoderOutput(ModelOutput): + """ + Class defining the outputs from [`SeamlessM4Tv2TextToUnitDecoder`]. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 + for *masked* + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + padding_mask: Optional[torch.Tensor] = None + + +@dataclass +class SeamlessM4Tv2TextToUnitOutput(ModelOutput): + """ + Class defining the outputs from [`SeamlessM4Tv2TextToUnitForConditionalGeneration`] and + [`SeamlessM4Tv2TextToUnitModel`]. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 + for *masked* + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + """ + + last_hidden_state: torch.FloatTensor = None + padding_mask: Optional[torch.Tensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + loss: Optional[torch.FloatTensor] = None + + +SEAMLESS_M4T_V2_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`~SeamlessM4Tv2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SEAMLESS_M4T_V2_MULTIMODAL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): + Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the + [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. + """ + +M4T_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + """ + +M4T_SPEECH_INPUTS_DOCSTRING = r""" + Args: + input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): + Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the + [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. + """ + +SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING = r""" + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +M4T_MODEL_INPUTS_DOCSTRING = SEAMLESS_M4T_V2_MULTIMODAL_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING + +M4T_TEXT_INPUTS_DOCSTRING = M4T_TEXT_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING + +M4T_SPEECH_INPUTS_DOCSTRING = M4T_SPEECH_INPUTS_DOCSTRING + SEAMLESS_M4T_V2_END_INPUTS_DOCSTRING + +M4T_TEXT_TO_UNITS_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`): + Character indices. The correspondence between characters and indices can be found in `char_to_id`, a + dictionary in the generation configuration. + char_count_per_id (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Number of characters per input id. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + inputs_embeds (`torch.FloatTensor` of shape`(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +############ UTILS ################ + + +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx + + +# Copied from transformers.models.bart.modeling_bart.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor): + """ + Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that + stops at the corresponding element in `seq_lens`. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`): + The sequences to mask, where `*` is any number of sequence-specific dimensions including none. + seq_lens (`torch.Tensor` of shape `(batch)`: + Each element represents the length of the sequence at the same index in `hidden_states` + + Returns: + `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)` + """ + batch_size, mask_seq_len = hidden_states.shape[:2] + + indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1) + + bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len) + + mask = hidden_states.new_ones((batch_size, mask_seq_len)) + + mask = mask.masked_fill(bool_mask, 0) + + return mask + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.format_speech_generation_kwargs with SeamlessM4T->SeamlessM4Tv2 +def format_speech_generation_kwargs(kwargs): + """ + Format kwargs for SeamlessM4Tv2 models that generate speech, attribute kwargs to either the text generation or the + speech generation models. + + Args: + kwargs (`dict`)`: + Keyword arguments are of two types: + + - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, + except for `decoder_input_ids` which will only be passed through the text components. + - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the + text model and speech model respectively. It has the priority over the keywords without a prefix. + + This means you can, for example, specify a generation strategy for one generation but not for the + other. + """ + # attribute kwargs to models + kwargs_text = {} + kwargs_speech = {} + for key, value in kwargs.items(): + if key.startswith("text_"): + key = key[len("text_") :] + kwargs_text[key] = value + elif key.startswith("speech_"): + key = key[len("speech_") :] + kwargs_speech[key] = value + else: + # If the key is already in a specific config, then it's been set with a + # submodules specific value and we don't override + if key not in kwargs_text: + kwargs_text[key] = value + if key not in kwargs_speech: + kwargs_speech[key] = value + return kwargs_text, kwargs_speech + + +############ SPEECH ENCODER related code ################ + + +class SeamlessM4Tv2ConformerFeatureProjection(nn.Module): + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeatureProjection.__init__ + def __init__(self, config): + super().__init__() + self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) + self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) + self.dropout = nn.Dropout(config.speech_encoder_dropout) + + def forward(self, hidden_states): + # non-projected hidden states are needed for quantization + norm_hidden_states = self.layer_norm(hidden_states.to(self.layer_norm.weight.dtype)) + hidden_states = self.projection(norm_hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeedForward with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2ConformerFeedForward(nn.Module): + def __init__(self, config, act_fn=None, dropout=None): + super().__init__() + dropout = dropout if dropout is not None else config.speech_encoder_dropout + act_fn = act_fn if act_fn is not None else config.speech_encoder_hidden_act + + self.intermediate_dropout = nn.Dropout(dropout) + self.intermediate_dense = nn.Linear(config.hidden_size, config.speech_encoder_intermediate_size) + self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn + + self.output_dense = nn.Linear(config.speech_encoder_intermediate_size, config.hidden_size) + self.output_dropout = nn.Dropout(dropout) + + def forward(self, hidden_states): + hidden_states = self.intermediate_dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.intermediate_dropout(hidden_states) + + hidden_states = self.output_dense(hidden_states) + hidden_states = self.output_dropout(hidden_states) + return hidden_states + + +class SeamlessM4Tv2ConformerConvolutionModule(nn.Module): + """Convolution block used in the conformer block. Uses a causal depthwise convolution similar to that + described in Section 2.1 of `https://doi.org/10.48550/arxiv.1609.03499""" + + def __init__(self, config): + super().__init__() + if (config.conv_depthwise_kernel_size - 1) % 2 == 1: + raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") + self.layer_norm = nn.LayerNorm(config.hidden_size) + self.pointwise_conv1 = nn.Conv1d( + config.hidden_size, + 2 * config.hidden_size, + kernel_size=1, + stride=1, + padding=0, + bias=False, + ) + self.glu = nn.GLU(dim=1) + self.depthwise_conv = nn.Conv1d( + config.hidden_size, + config.hidden_size, + config.conv_depthwise_kernel_size, + stride=1, + padding=0, + groups=config.hidden_size, + bias=False, + ) + self.depthwise_layer_norm = nn.LayerNorm(config.hidden_size) + self.activation = ACT2FN[config.speech_encoder_hidden_act] + self.pointwise_conv2 = nn.Conv1d( + config.hidden_size, + config.hidden_size, + kernel_size=1, + stride=1, + padding=0, + bias=False, + ) + self.dropout = nn.Dropout(config.speech_encoder_dropout) + + def forward(self, hidden_states, attention_mask=None): + hidden_states = self.layer_norm(hidden_states) + + # Ensure that we do not leak padded positions in depthwise convolution. + # Put 0 where necessary + if attention_mask is not None: + hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) + + # exchange the temporal dimension and the feature dimension + hidden_states = hidden_states.transpose(1, 2) + + # GLU mechanism + # => (batch, 2*channel, dim) + hidden_states = self.pointwise_conv1(hidden_states) + # => (batch, channel, dim) + hidden_states = self.glu(hidden_states) + + # Pad the sequence entirely on the left because of causal convolution. + hidden_states = torch.nn.functional.pad(hidden_states, (self.depthwise_conv.kernel_size[0] - 1, 0)) + + # 1D Depthwise Conv + hidden_states = self.depthwise_conv(hidden_states) + hidden_states = self.depthwise_layer_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + hidden_states = self.activation(hidden_states) + + hidden_states = self.pointwise_conv2(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = hidden_states.transpose(1, 2) + return hidden_states + + +class SeamlessM4Tv2ConformerSelfAttention(nn.Module): + """Construct a SeamlessM4Tv2ConformerSelfAttention object. + Can be enhanced with relative position embeddings. + """ + + def __init__(self, config, use_position_embeddings=True): + super().__init__() + + self.head_size = config.hidden_size // config.speech_encoder_attention_heads + self.num_heads = config.speech_encoder_attention_heads + self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None + + self.linear_q = nn.Linear(config.hidden_size, config.hidden_size) + self.linear_k = nn.Linear(config.hidden_size, config.hidden_size) + self.linear_v = nn.Linear(config.hidden_size, config.hidden_size) + self.linear_out = nn.Linear(config.hidden_size, config.hidden_size) + + self.dropout = nn.Dropout(p=config.speech_encoder_dropout) + + if self.position_embeddings_type == "relative_key": + self.left_max_position_embeddings = config.left_max_position_embeddings + self.right_max_position_embeddings = config.right_max_position_embeddings + num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1 + self.distance_embedding = nn.Embedding(num_positions, self.head_size) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + # self-attention mechanism + batch_size, sequence_length, hidden_size = hidden_states.size() + + # make sure query/key states can be != value states + query_key_states = hidden_states + value_states = hidden_states + + # project query_key_states and value_states + query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) + key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) + value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) + + # => (batch, head, time1, d_k) + query = query.transpose(1, 2) + key = key.transpose(1, 2) + value = value.transpose(1, 2) + + attn_weights = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) + + if self.position_embeddings_type == "relative_key": + query_length, key_length = query.shape[2], key.shape[2] + + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_r - position_ids_l + distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings) + + positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings) + positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility + + relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding) + attn_weights = attn_weights + (relative_position_attn_weights / math.sqrt(self.head_size)) + + # apply attention_mask if necessary + if attention_mask is not None: + attn_weights = attn_weights + attention_mask + + # => (batch, head, time1, time2) + attn_weights = torch.softmax(attn_weights, dim=-1) + attn_weights = self.dropout(attn_weights) + + # => (batch, head, time1, d_k) + attn_output = torch.matmul(attn_weights, value) + + # => (batch, time1, hidden_size) + attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) + attn_output = self.linear_out(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights + + +class SeamlessM4Tv2ConformerEncoderLayer(nn.Module): + """Conformer block based on https://arxiv.org/abs/2005.08100.""" + + # Copied from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer.Wav2Vec2ConformerEncoderLayer.__init__ with Wav2Vec2->SeamlessM4Tv2, attention_dropout->speech_encoder_dropout, torch.nn->nn + def __init__(self, config): + super().__init__() + embed_dim = config.hidden_size + dropout = config.speech_encoder_dropout + + # Feed-forward 1 + self.ffn1_layer_norm = nn.LayerNorm(embed_dim) + self.ffn1 = SeamlessM4Tv2ConformerFeedForward(config) + + # Self-Attention + self.self_attn_layer_norm = nn.LayerNorm(embed_dim) + self.self_attn_dropout = nn.Dropout(dropout) + self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config) + + # Conformer Convolution + self.conv_module = SeamlessM4Tv2ConformerConvolutionModule(config) + + # Feed-forward 2 + self.ffn2_layer_norm = nn.LayerNorm(embed_dim) + self.ffn2 = SeamlessM4Tv2ConformerFeedForward(config) + self.final_layer_norm = nn.LayerNorm(embed_dim) + + def forward( + self, + hidden_states, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + conv_attention_mask: Optional[torch.Tensor] = None, + ): + hidden_states = hidden_states + + # 1. Feed-Forward 1 layer + residual = hidden_states + hidden_states = self.ffn1_layer_norm(hidden_states) + hidden_states = self.ffn1(hidden_states) + hidden_states = hidden_states * 0.5 + residual + residual = hidden_states + + # 2. Self-Attention layer + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = self.self_attn_dropout(hidden_states) + hidden_states = hidden_states + residual + + # 3. Convolutional Layer + residual = hidden_states + hidden_states = self.conv_module(hidden_states, attention_mask=conv_attention_mask) + hidden_states = residual + hidden_states + + # 4. Feed-Forward 2 Layer + residual = hidden_states + hidden_states = self.ffn2_layer_norm(hidden_states) + hidden_states = self.ffn2(hidden_states) + hidden_states = hidden_states * 0.5 + residual + hidden_states = self.final_layer_norm(hidden_states) + + return hidden_states, attn_weights + + +class SeamlessM4Tv2ConformerEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + + self.dropout = nn.Dropout(config.speech_encoder_dropout) + self.layers = nn.ModuleList( + [SeamlessM4Tv2ConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)] + ) + + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.gradient_checkpointing = False + + def _apply_chunk_attention(self, attention_mask, hidden_states): + """ + Creates a chunk attention mask. It creates a mask to prevent attention across chunks, ensuring that each + position attends only to positions within its own chunk. If a left chunk overlap is specified + (`speech_encoder_chunk_size` in the configuration), the attention mask is adjusted accordingly to allow each + position to also attends the `speech_encoder_chunk_size - 1` previous chunks. + """ + sequence_len = hidden_states.shape[1] + + chunk_indices = torch.arange(sequence_len, device=hidden_states.device) + chunk_indices = torch.div(chunk_indices, self.config.speech_encoder_chunk_size).long() + + start_indices = torch.full_like(chunk_indices, 0) + if self.config.speech_encoder_left_chunk_num >= 0: + start_indices = (chunk_indices - self.config.speech_encoder_left_chunk_num).clamp_(min=0) + start_indices = start_indices * self.config.speech_encoder_chunk_size + start_indices = start_indices + start_indices = start_indices.unsqueeze(1).expand(-1, sequence_len) + + end_indices = ((chunk_indices + 1) * self.config.speech_encoder_chunk_size).clamp_(max=sequence_len) + + end_indices = end_indices.unsqueeze(1).expand(-1, sequence_len) + + indices = torch.arange(sequence_len, device=hidden_states.device).unsqueeze(0).expand(sequence_len, -1) + + chunk_mask = (indices < start_indices) | (indices >= end_indices) + chunk_mask = chunk_mask.unsqueeze(0).unsqueeze(0) + + attention_mask = chunk_mask if attention_mask is None else (attention_mask.bool() | chunk_mask) + attention_mask = attention_mask.to(dtype=hidden_states.dtype) + return attention_mask + + def forward( + self, + hidden_states, + attention_mask=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + conv_attention_mask = attention_mask + if attention_mask is not None: + # make sure padded tokens output 0 + hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) + # extend attention_mask + attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) + attention_mask = attention_mask.expand( + attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] + ) + + if self.config.speech_encoder_chunk_size is not None: + attention_mask = self._apply_chunk_attention(attention_mask, hidden_states) + + if attention_mask is not None: + attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min + + hidden_states = self.dropout(hidden_states) + + deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() + + for i, layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = torch.rand([]) + + skip_the_layer = ( + True if self.training and (dropout_probability < self.config.speech_encoder_layerdrop) else False + ) + if not skip_the_layer or deepspeed_zero3_is_enabled: + # under deepspeed zero3 all gpus must run in sync + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer.__call__, + hidden_states, + attention_mask, + ) + else: + layer_outputs = layer( + hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + conv_attention_mask=conv_attention_mask, + ) + hidden_states = layer_outputs[0] + + if skip_the_layer: + layer_outputs = (None, None) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapterLayer with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2ConformerAdapterLayer(nn.Module): + def __init__(self, config): + super().__init__() + embed_dim = config.hidden_size + dropout = config.adaptor_dropout + + self.kernel_size = config.adaptor_kernel_size + self.stride = config.adaptor_stride + + # 1. residual convolution + self.residual_layer_norm = nn.LayerNorm(embed_dim) + self.residual_conv = nn.Conv1d( + embed_dim, + 2 * embed_dim, + self.kernel_size, + stride=self.stride, + padding=self.stride // 2, + ) + self.activation = nn.GLU(dim=1) + + # Self-Attention + self.self_attn_layer_norm = nn.LayerNorm(embed_dim) + self.self_attn_conv = nn.Conv1d( + embed_dim, + 2 * embed_dim, + self.kernel_size, + stride=self.stride, + padding=self.stride // 2, + ) + self.self_attn = SeamlessM4Tv2ConformerSelfAttention(config, use_position_embeddings=False) + self.self_attn_dropout = nn.Dropout(dropout) + + # Feed-forward + self.ffn_layer_norm = nn.LayerNorm(embed_dim) + self.ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=dropout) + + def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask): + pad = self.kernel_size // 2 + seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1) + + seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1 + + return seq_lens.floor() + + def forward( + self, + hidden_states, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): + residual = self.residual_layer_norm(hidden_states) + + # Apply pooling to the residual to match the sequence length of the + # multi-head attention output. + # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) + residual = residual.transpose(1, 2) + residual = self.residual_conv(residual) + residual = self.activation(residual) + # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) + residual = residual.transpose(1, 2) + + hidden_states = self.self_attn_layer_norm(hidden_states) + # Apply pooling before feeding to the multihead-attention layer. + # (batch, seq_len, feature_dim) -> (batch, feature_dim, seq_len) + hidden_states = hidden_states.transpose(1, 2) + hidden_states = self.self_attn_conv(hidden_states) + hidden_states = self.activation(hidden_states) + # (batch, feature_dim, seq_len) -> (batch, seq_len, feature_dim) + hidden_states = hidden_states.transpose(1, 2) + + if attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + hidden_states.device + ) + attention_mask = _compute_new_attention_mask(hidden_states=hidden_states, seq_lens=sub_sampled_lengths) + attention_mask = _prepare_4d_attention_mask( + attention_mask, + hidden_states.dtype, + ) + + # The rest of the computation is identical to a vanilla Transformer + # encoder layer. + hidden_states, attn_weigths = self.self_attn( + hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = self.self_attn_dropout(hidden_states) + hidden_states = hidden_states + residual + + residual = hidden_states + + hidden_states = self.ffn_layer_norm(hidden_states) + hidden_states = self.ffn(hidden_states) + residual + + return hidden_states + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerAdapter with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2ConformerAdapter(nn.Module): + def __init__(self, config): + super().__init__() + + self.layers = nn.ModuleList( + SeamlessM4Tv2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers) + ) + + def forward(self, hidden_states, attention_mask): + # down project hidden_states if necessary + + for layer in self.layers: + hidden_states = layer(hidden_states, attention_mask) + + return hidden_states + + +############ TEXT / UNITS related code ################ + + +# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding +class SeamlessM4Tv2SinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length.""" + + def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): + super().__init__() + self.offset = 2 + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) + + def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): + emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) + if hasattr(self, "weights"): + # in forward put the weights on the correct dtype and device of the param + emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) + + self.register_buffer("weights", emb_weights, persistent=False) + + @staticmethod + def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): + """ + Build sinusoidal embeddings. + + This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of + "Attention Is All You Need". + """ + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) + emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) + if embedding_dim % 2 == 1: + # zero pad + emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) + if padding_idx is not None: + emb[padding_idx, :] = 0 + + return emb.to(torch.get_default_dtype()) + + @torch.no_grad() + def forward( + self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0 + ): + if input_ids is not None: + bsz, seq_len = input_ids.size() + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( + input_ids.device + ) + else: + bsz, seq_len = inputs_embeds.size()[:-1] + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) + + # expand embeddings if needed + max_pos = self.padding_idx + 1 + seq_len + past_key_values_length + if max_pos > self.weights.size(0): + self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) + + return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() + + def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length + + +class SeamlessM4Tv2Attention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + # Copied from transformers.models.bart.modeling_bart.BartAttention.__init__ with Bart->SeamlessM4Tv2 + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[SeamlessM4Tv2Config] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, projection: torch.Tensor) -> torch.Tensor: + new_projection_shape = projection.size()[:-1] + (self.num_heads, self.head_dim) + # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) + new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) + return new_projection + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + is_cross_attention = encoder_hidden_states is not None + batch_size, seq_length = hidden_states.shape[:2] + + # use encoder_hidden_states if cross attention + current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + # checking that the `sequence_length` of the `past_key_value` is the same as the he provided + # `encoder_hidden_states` to support prefix tuning + if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + else: + key_states = self._shape(self.k_proj(current_states)) + value_states = self._shape(self.v_proj(current_states)) + if past_key_value is not None and not is_cross_attention: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + query_states = self._shape(self.q_proj(hidden_states) * self.scaling) + attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # (batch_size, n_heads, seq_length, key_length) + attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores) + attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + # attn_output = torch.bmm(attn_probs, value_states) ? + context_states = torch.matmul(attn_weights, value_states) + # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ? + context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) + attn_output = self.out_proj(context_states) + + if output_attentions: + return attn_output, attn_weights, past_key_value + else: + return attn_output, None, past_key_value + + +# Copied from transformers.models.nllb_moe.modeling_nllb_moe.NllbMoeDenseActDense with NllbMoe->SeamlessM4Tv2,DenseActDense->FeedForwardNetwork, d_model->hidden_size +class SeamlessM4Tv2FeedForwardNetwork(nn.Module): + def __init__(self, config: SeamlessM4Tv2Config, ffn_dim: int): + super().__init__() + self.fc1 = nn.Linear(config.hidden_size, ffn_dim) + self.fc2 = nn.Linear(ffn_dim, config.hidden_size) + self.dropout = nn.Dropout(config.activation_dropout) + self.act = ACT2FN[config.activation_function] + + def forward(self, hidden_states): + hidden_states = self.fc1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.dropout(hidden_states) + if ( + isinstance(self.fc2.weight, torch.Tensor) + and hidden_states.dtype != self.fc2.weight.dtype + and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8) + ): + hidden_states = hidden_states.to(self.fc2.weight.dtype) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoderLayer with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2EncoderLayer(nn.Module): + def __init__(self, config: SeamlessM4Tv2Config, encoder_ffn_dim=None, encoder_attention_heads=None): + super().__init__() + encoder_ffn_dim = config.encoder_ffn_dim if encoder_ffn_dim is None else encoder_ffn_dim + encoder_attention_heads = ( + config.encoder_attention_heads if encoder_attention_heads is None else encoder_attention_heads + ) + + self.embed_dim = config.hidden_size + self.self_attn = SeamlessM4Tv2Attention( + embed_dim=self.embed_dim, + num_heads=encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.attn_dropout = nn.Dropout(config.dropout) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=encoder_ffn_dim) + + self.ffn_layer_norm = nn.LayerNorm(config.hidden_size) + self.ffn_dropout = nn.Dropout(config.activation_dropout) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: bool = False, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): + attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very + large negative values. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = self.attn_dropout(hidden_states) + hidden_states = residual + hidden_states + + residual = hidden_states + + hidden_states = self.ffn_layer_norm(hidden_states) + + hidden_states = self.ffn(hidden_states) + hidden_states = self.ffn_dropout(hidden_states) + + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoderLayer with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2DecoderLayer(nn.Module): + def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None): + super().__init__() + decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim + decoder_attention_heads = ( + config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads + ) + + self.embed_dim = config.hidden_size + self.self_attn = SeamlessM4Tv2Attention( + embed_dim=self.embed_dim, + num_heads=decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.attn_dropout = nn.Dropout(config.dropout) + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.cross_attention = SeamlessM4Tv2Attention( + self.embed_dim, decoder_attention_heads, config.attention_dropout, is_decoder=True + ) + self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim) + + self.ffn = SeamlessM4Tv2FeedForwardNetwork(config, ffn_dim=decoder_ffn_dim) + + self.ffn_layer_norm = nn.LayerNorm(config.hidden_size) + self.ffn_dropout = nn.Dropout(config.activation_dropout) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): + attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very + large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): + encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by + very large negative values. + past_key_value (`Tuple(torch.FloatTensor)`): + cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = self.attn_dropout(hidden_states) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + hidden_states = self.cross_attention_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.cross_attention( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + past_key_value=cross_attn_past_key_value, + attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = self.attn_dropout(hidden_states) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value += cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + + hidden_states = self.ffn_layer_norm(hidden_states) + + hidden_states = self.ffn(hidden_states) + hidden_states = self.ffn_dropout(hidden_states) + + hidden_states = residual + hidden_states + + outputs = (hidden_states, present_key_value) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +class SeamlessM4Tv2TextToUnitDecoderLayer(nn.Module): + def __init__(self, config: SeamlessM4Tv2Config, decoder_ffn_dim=None, decoder_attention_heads=None): + super().__init__() + decoder_ffn_dim = config.decoder_ffn_dim if decoder_ffn_dim is None else decoder_ffn_dim + decoder_attention_heads = ( + config.decoder_attention_heads if decoder_attention_heads is None else decoder_attention_heads + ) + self.dropout = config.dropout + self.embed_dim = config.hidden_size + + self.self_attn = SeamlessM4Tv2Attention( + embed_dim=self.embed_dim, + num_heads=decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + self.conv1 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same") + self.activation_fn = ACT2FN[config.activation_function] + self.conv2 = nn.Conv1d(self.embed_dim, self.embed_dim, kernel_size=7, stride=1, padding="same") + + self.conv_layer_norm = nn.LayerNorm(config.hidden_size) + self.conv_dropout = nn.Dropout(self.dropout) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + padding_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): + input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): + attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very + large negative values. + padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* + or 0 for *masked* + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Conv + residual = hidden_states + + # Apply padding mask to avoid leaking padded positions in the convolution layer + if padding_mask is not None: + hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) + hidden_states = self.conv1(hidden_states.transpose(1, 2)).transpose(1, 2) + + if padding_mask is not None: + hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) + + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.conv2(hidden_states.transpose(1, 2)).transpose(1, 2) + + hidden_states = self.conv_dropout(hidden_states) + hidden_states = residual + hidden_states + hidden_states = self.conv_layer_norm(hidden_states) + + outputs = (hidden_states, present_key_value) + + if output_attentions: + outputs += self_attn_weights + + return outputs + + +############ SUB-MODELS related code ################ + + +class SeamlessM4Tv2PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SeamlessM4Tv2Config + base_model_prefix = "seamless_m4t_v2" + supports_gradient_checkpointing = True + _no_split_modules = [ + "SeamlessM4Tv2EncoderLayer", + "SeamlessM4Tv2DecoderLayer", + "SeamlessM4Tv2ConformerEncoderLayer", + "SeamlessM4Tv2TextToUnitDecoderLayer", + ] + + def _init_weights(self, module): + """Initialize the weights""" + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, SeamlessM4Tv2ConformerSelfAttention): + if hasattr(module, "pos_bias_u"): + nn.init.xavier_uniform_(module.pos_bias_u) + if hasattr(module, "pos_bias_v"): + nn.init.xavier_uniform_(module.pos_bias_v) + elif isinstance(module, SeamlessM4Tv2ConformerFeatureProjection): + k = math.sqrt(1 / module.projection.in_features) + nn.init.uniform_(module.projection.weight, a=-k, b=k) + nn.init.uniform_(module.projection.bias, a=-k, b=k) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)): + nn.init.kaiming_normal_(module.weight) + if module.bias is not None: + k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) + nn.init.uniform_(module.bias, a=-k, b=k) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TPreTrainedModel._compute_sub_sample_lengths_from_attention_mask + def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask): + kernel_size, stride = self.config.adaptor_kernel_size, self.config.adaptor_stride + pad = kernel_size // 2 + seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1) + + seq_lens = ((seq_lens + 2 * pad - kernel_size) / stride) + 1 + + return seq_lens.floor() + + def _indices_to_subwords(self, input_ids): + """ + Returns the corresponding text string for each input id. + """ + if not hasattr(self.generation_config, "id_to_text"): + raise ValueError( + """This model generation config doesn't have a `id_to_text` key which maps + token ids to subwords. Make sure to load the right generation config.""" + ) + batch_size, sequence_len = input_ids.shape + + subwords_batch = [] + for batch_id in range(batch_size): + subwords = [] + for i in range(sequence_len): + subword = self.generation_config.id_to_text.get(str(input_ids[batch_id, i].item())) + subwords.append(str(subword)) + subwords_batch.append(subwords) + return subwords_batch + + def _count_character_length_in_subword( + self, + input_ids, + subwords_batch, + merge_space_with_prev_subword=False, + pad_token_id=0, + unk_token_id=1, + space="▁", + ): + """ + Counts the number of characters per text string associated with the input token id. + + Args: + input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + subwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`): + Corresponding text string for each input id. + merge_space_with_prev_subword (`bool`, *optional*, defaults to `False`): + Indicates if the space character is merged with the previous subword. If `False`, it will be merged + with the next subword. + pad_token_id (`int`, *optional*, defaults to 0): + The id of the _padding_ text token. If it is encountered when calculating the length of a subword + sample, the lengths of subsequent subwords will be set to 0. + unk_token_id (`int`, *optional*, defaults to 1): + The id of the _unknown_ text token. Associated to a subword of length 1. + space (`str`, *optional*, defaults to `"▁"`): + The space character. + """ + batch_size, _ = input_ids.shape + + char_count_per_id = input_ids.new_zeros(input_ids.size()) + + subword_lens = input_ids.ne(pad_token_id).sum(1) + + for batch_id in range(batch_size): + # We slice out the tensor till the padding index. + subword_indices = input_ids[batch_id, : subword_lens[batch_id]] + subwords = subwords_batch[batch_id][: subword_lens[batch_id]] + + is_next_start_with_space = [ + len(subwords[i + 1]) > 1 and subwords[i + 1][0] == space if i < len(subwords) - 1 else False + for i in range(len(subwords)) + ] + is_punc = [ + len(subwords[i]) == 1 + and not subwords[i].isalpha() + and not subwords[i].isnumeric() + and subwords[i] != space + for i in range(len(subwords)) + ] + for i, (subword_idx, subword) in enumerate(zip(subword_indices, subwords)): + if subword_idx == pad_token_id: + break + + if subword_idx == unk_token_id: + # We set char_len to 1 for an unk token. + char_len = 1 + + if merge_space_with_prev_subword and is_next_start_with_space[i]: + char_len += 1 + else: + # By default, spaces are merged with the next subword. + # char_len includes the space. + char_len = len(subword) + + if merge_space_with_prev_subword: + # Add the space for the next subword. + if is_next_start_with_space[i]: + char_len += 1 + # Subtract the space for the current subword. + if i > 0 and is_next_start_with_space[i - 1]: + char_len -= 1 + else: + # Merge space with punctuation mark by default. + if is_punc[i] and is_next_start_with_space[i]: + char_len += 1 + # Subtract the space for the subword succeeding the punctuation mark. + elif i > 0 and is_punc[i - 1] and is_next_start_with_space[i - 1]: + char_len -= 1 + + char_count_per_id[batch_id, i] = char_len + + return char_count_per_id + + def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1): + """ + Returns the corresponding character input id for each character of `subwords_batch`. + + Args: + input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + subwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`): + Corresponding text string for each input id. + char_count_per_id (`torch.Tensor` of shape `(batch_size, sequence_length)`): + Number of characters per input id. + pad_token_id (`int`, *optional*, defaults to 0): + The id of the _padding_ text token. If it is encountered when calculating the length of a subword + sample, the lengths of subsequent subwords will be set to 0. + unk_token_id (`int`, *optional*, defaults to 1): + The id of the _unknown_ text token. Associated to a subword of length 1. + Returns: + `torch.Tensor`: Tensor of shape `(batch_size, char_sequence_length)` containing the id of each character. + """ + if not hasattr(self.generation_config, "char_to_id"): + raise ValueError( + """This model generation config doesn't have a `char_to_id` key which maps + characters to character ids. Make sure to load the right generation config.""" + ) + + batch_size = input_ids.shape[0] + max_len = int(char_count_per_id.sum(1).max().item()) + + char_seqs = input_ids.new_zeros((batch_size, max_len)).fill_(pad_token_id) + + subword_lens = input_ids.ne(pad_token_id).sum(1) + + for batch_id in range(batch_size): + total = 0 + subword_indices = input_ids[batch_id, : subword_lens[batch_id]] + subwords = subwords_batch[batch_id][: subword_lens[batch_id]] + for subword_idx, subword in zip(subword_indices, subwords): + if subword_idx == unk_token_id: + char_ids = [unk_token_id] + else: + # Get char token indices corresponding to the subwords. + char_ids = [self.generation_config.char_to_id.get(ch, unk_token_id) for ch in list(subword)] + char_seq_len = len(char_ids) + char_seqs[batch_id, total : total + char_seq_len] = torch.tensor(char_ids).to(char_seqs) + total += char_seq_len + return char_seqs + + def _hard_upsample(self, hidden_states, durations): + """ + Repeats the time dimension of each sample in the batch based on the corresponding duration. + + Args: + hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*): + The sequence to repeat, where `*` is any number of sequence-specific dimensions including none. + durations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates how many times to repeat time segments. + """ + if hidden_states.size(0) == 1: + hidden_states = torch.repeat_interleave(hidden_states, durations.view(-1), dim=1) + else: + # if batched sample, need to interleave per sample, and pad -> loss of parallelism + if hidden_states.shape[0] > 1 and self.training: + logger.warning_once( + """`self.training=True` and you use batching. You lose parallelism during the hifigan + forward pass because the samples are interleaved.""" + ) + hidden_states = [ + torch.repeat_interleave(hidden_state, duration, dim=0) + for (hidden_state, duration) in zip(hidden_states, durations) + ] + + hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True) + + return hidden_states + + +@add_start_docstrings( + """Transformer speech encoder consisting of *config.speech_encoder_layers* conformer self attention layers. + Each layer is a [`SeamlessM4Tv2ConformerEncoderLayer`].""", + SEAMLESS_M4T_V2_START_DOCSTRING, +) +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TSpeechEncoder with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2SpeechEncoder(SeamlessM4Tv2PreTrainedModel): + main_input_name = "input_features" + + def __init__(self, config: SeamlessM4Tv2Config): + super().__init__(config) + + self.feature_projection = SeamlessM4Tv2ConformerFeatureProjection(config) + self.encoder = SeamlessM4Tv2ConformerEncoder(config) + self.intermediate_ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=0.0) + self.adapter = SeamlessM4Tv2ConformerAdapter(config) if config.add_adapter else None + self.inner_layer_norm = nn.LayerNorm(config.hidden_size) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_features: Optional[torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_features is None: + raise ValueError( + """Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4Tv2SpeechEncoder.forward`. + Make sure one of them is not `None`.""" + ) + + hidden_states = self.feature_projection(input_features) + + encoder_outputs = self.encoder( + hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = encoder_outputs[0] + + expanded_hidden_states = self.intermediate_ffn(hidden_states) + hidden_states = hidden_states + 0.5 * expanded_hidden_states + + if self.adapter is not None: + hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) + + hidden_states = self.inner_layer_norm(hidden_states) + + if not return_dict: + return (hidden_states,) + encoder_outputs[1:] + + return Wav2Vec2BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +# inspired from MBart and NllbMoe +@add_start_docstrings( + "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4Tv2EncoderLayer`].", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + embed_tokens (`nn.Embedding`, *optional*): + Input embedding + is_t2u_encoder (`bool`, *optional*, defaults to `False`): + indicates if it belongs to the text-to-units model, in which case it won't have input embeddings + """, +) +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoder with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2Encoder(SeamlessM4Tv2PreTrainedModel): + def __init__( + self, + config: SeamlessM4Tv2Config, + embed_tokens: Optional[nn.Embedding] = None, + is_t2u_encoder: bool = False, + ): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + self.padding_idx = config.pad_token_id + embed_dim = config.hidden_size + + self.is_t2u_encoder = is_t2u_encoder + self.max_source_positions = config.max_position_embeddings + + if not self.is_t2u_encoder: + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding( + self.max_source_positions, + embed_dim, + self.padding_idx, + ) + + layers = [] + for _ in range(config.encoder_layers): + layers.append( + SeamlessM4Tv2EncoderLayer( + config, + encoder_attention_heads=config.encoder_attention_heads, + encoder_ffn_dim=config.encoder_ffn_dim, + ) + ) + + self.layers = nn.ModuleList(layers) + + self.layer_norm = nn.LayerNorm(config.hidden_size) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and self.is_t2u_encoder: + raise ValueError( + "You cannot pass input_ids to the encoder of the text_to_units model. Pass inputs_embeds instead." + ) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_shape = input.shape + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + if not self.is_t2u_encoder: + embed_pos = self.embed_positions(input) + + hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device) + else: + hidden_states = inputs_embeds + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + to_drop = False + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: # skip the layer + to_drop = True + + if to_drop: + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.forward, + hidden_states, + attention_mask, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +@add_start_docstrings( + "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + embed_tokens (`nn.Embedding`, *optional*): + Input embedding + """, +) +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TDecoder with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2Decoder(SeamlessM4Tv2PreTrainedModel): + def __init__( + self, + config: SeamlessM4Tv2Config, + embed_tokens: Optional[nn.Embedding] = None, + ): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + # if embed_tokens defined, use its shape instead + self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx) + self.embed_tokens.weight = embed_tokens.weight + else: + self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx) + + self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding( + self.max_target_positions, + config.hidden_size, + padding_idx=self.padding_idx, + ) + + layers = [] + for _ in range(config.decoder_layers): + layers.append( + SeamlessM4Tv2DecoderLayer( + config, + decoder_attention_heads=config.decoder_attention_heads, + decoder_ffn_dim=config.decoder_ffn_dim, + ) + ) + self.layers = nn.ModuleList(layers) + self.layer_norm = nn.LayerNorm(config.hidden_size) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_shape = input.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) + + # embed positions + positions = self.embed_positions(input, past_key_values_length=past_key_values_length) + + hidden_states = inputs_embeds + positions.to(inputs_embeds.device) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + None, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[1],) + + if output_attentions: + all_self_attns += (layer_outputs[2],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[3],) + + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SeamlessM4Tv2DecoderLayer`].", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + embed_tokens (`nn.Embedding`, *optional*): + Input embedding + """, +) +class SeamlessM4Tv2TextToUnitDecoder(SeamlessM4Tv2PreTrainedModel): + def __init__( + self, + config: SeamlessM4Tv2Config, + embed_tokens: Optional[nn.Embedding] = None, + ): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + # if embed_tokens defined, use its shape instead + self.embed_tokens = nn.Embedding(embed_tokens.num_embeddings, embed_tokens.embedding_dim, self.padding_idx) + self.embed_tokens.weight = embed_tokens.weight + else: + self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx) + + self.embed_char = nn.Embedding(config.char_vocab_size, config.hidden_size) + self.embed_char_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding( + self.max_target_positions, + config.hidden_size, + padding_idx=self.padding_idx, + ) + + self.pos_emb_alpha_char = nn.Parameter(torch.ones(1)) + self.pos_emb_alpha = nn.Parameter(torch.ones(1)) + self.duration_predictor = SeamlessM4Tv2VariancePredictor( + config.variance_predictor_embed_dim, + config.variance_predictor_hidden_dim, + config.variance_predictor_kernel_size, + config.variance_pred_dropout, + ) + + self.embed_positions = SeamlessM4Tv2SinusoidalPositionalEmbedding( + self.max_target_positions, + config.hidden_size, + padding_idx=self.padding_idx, + ) + + layers = [] + for _ in range(config.decoder_layers): + layers.append( + SeamlessM4Tv2TextToUnitDecoderLayer( + config, + decoder_attention_heads=config.decoder_attention_heads, + decoder_ffn_dim=config.decoder_ffn_dim, + ) + ) + self.layers = nn.ModuleList(layers) + self.layer_norm = nn.LayerNorm(config.hidden_size) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + char_input_ids: torch.LongTensor = None, + char_count_per_id: torch.LongTensor = None, + encoder_hidden_states: torch.FloatTensor = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SeamlessM4Tv2TextToUnitDecoderOutput]: + r""" + Args: + char_input_ids (`torch.LongTensor` of shape `(batch_size, char_sequence_length)`): + Character indices. The correspondence between characters and indices can be found in `char_to_id`, a + dictionary in the generation configuration. + char_count_per_id (`torch.Tensor` of shape `(batch_size, encoder_sequence_length)`): + Number of characters per text input id. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # create padding mask for character lengths + char_padding_mask = _compute_new_attention_mask(char_input_ids, char_count_per_id.sum(1)) + + # upsample hidden states according to characters sequence lengths + char_hidden_states = self._hard_upsample(encoder_hidden_states, char_count_per_id) + # embed char positions + char_positions = self.pos_emb_alpha_char * self.embed_char_positions(inputs_embeds=char_hidden_states) + # update char hidden states with positions and char embeddings + char_hidden_states = self.embed_char(char_input_ids) * self.embed_scale + char_positions + char_hidden_states + + # predict duration + log_dur_pred = self.duration_predictor(char_hidden_states, padding_mask=char_padding_mask) + dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1) + dur_out = dur_out.masked_fill(~char_padding_mask.bool(), 0.0) + + # upsample char hidden states according to predicted duration + char_hidden_states = self._hard_upsample(char_hidden_states, dur_out) + + positions = self.pos_emb_alpha * self.embed_positions(inputs_embeds=char_hidden_states) + hidden_states = char_hidden_states + positions + + padding_mask = _compute_new_attention_mask(hidden_states, dur_out.sum(1)) + attention_mask = _prepare_4d_attention_mask(padding_mask, hidden_states.dtype) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + padding_mask, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + padding_mask=padding_mask, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attns += (layer_outputs[2],) + + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attns, padding_mask] if v is not None) + return SeamlessM4Tv2TextToUnitDecoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + padding_mask=padding_mask, + ) + + +@add_start_docstrings( + "Transformer bare text-to-unit encoder-decoder. The encoder is a [`SeamlessM4Tv2Encoder`] without embeddings and the decoder is a [`SeamlessM4Tv2TextToUnitDecoder`].", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. + """, +) +class SeamlessM4Tv2TextToUnitModel(SeamlessM4Tv2PreTrainedModel): + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitModel.__init__ with SeamlessM4T->SeamlessM4Tv2, Decoder->TextToUnitDecoder + def __init__( + self, + config: SeamlessM4Tv2Config, + embed_tokens_decoder: Optional[nn.Embedding] = None, + ): + super().__init__(config) + + self.encoder = SeamlessM4Tv2Encoder(config, is_t2u_encoder=True) + self.decoder = SeamlessM4Tv2TextToUnitDecoder(config, embed_tokens_decoder) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + char_input_ids: torch.LongTensor = None, + char_count_per_id: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, dec_hidden, dec_attn, padding_mask) + decoder_outputs = self.decoder( + char_input_ids=char_input_ids, + char_count_per_id=char_count_per_id, + encoder_hidden_states=encoder_outputs[0], + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return SeamlessM4Tv2TextToUnitOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + padding_mask=decoder_outputs.padding_mask, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "Transformer text-to-unit encoder-decoder with a language model head. The base encoder-decoder model is a [`SeamlessM4Tv2TextToUnitModel`].", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. + """, +) +class SeamlessM4Tv2TextToUnitForConditionalGeneration(SeamlessM4Tv2PreTrainedModel): + _keys_to_ignore_on_load_missing = [ + "vocoder", + "speech_encoder", + "text_encoder", + "text_decoder", + ] + _tied_weights_keys = ["decoder.embed_tokens.weight", "lm_head.weight"] + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.__init__ with SeamlessM4T->SeamlessM4Tv2 + def __init__( + self, + config: SeamlessM4Tv2Config, + embed_tokens_decoder: Optional[nn.Embedding] = None, + ): + # update config - used principaly for bos_token_id etc. + config = copy.deepcopy(config) + for param, val in config.to_dict().items(): + if param.startswith("t2u_"): + config.__setattr__(param[4:], val) + super().__init__(config) + + self.model = SeamlessM4Tv2TextToUnitModel(config, embed_tokens_decoder) + + self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_encoder + def get_encoder(self): + return self.model.encoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_decoder + def get_decoder(self): + return self.model.decoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.get_input_embeddings + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration.set_input_embeddings + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + @add_start_docstrings_to_model_forward(M4T_TEXT_TO_UNITS_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + char_input_ids: torch.LongTensor = None, + char_count_per_id: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + input_ids, + char_input_ids=char_input_ids, + char_count_per_id=char_count_per_id, + attention_mask=attention_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.lm_head(outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return SeamlessM4Tv2TextToUnitOutput( + last_hidden_state=lm_logits, + padding_mask=outputs.padding_mask, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + loss=masked_lm_loss, + ) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TTextToUnitForConditionalGeneration._tie_weights + def _tie_weights(self) -> None: + if getattr(self.config, "tie_word_embeddings", True): + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None: + self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) + + +############ VOCODER related code ################ + + +HIFIGAN_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`SeamlessM4Tv2Config`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock +class HifiGanResidualBlock(nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): + super().__init__() + self.leaky_relu_slope = leaky_relu_slope + + self.convs1 = nn.ModuleList( + [ + nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=dilation[i], + padding=self.get_padding(kernel_size, dilation[i]), + ) + for i in range(len(dilation)) + ] + ) + self.convs2 = nn.ModuleList( + [ + nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=self.get_padding(kernel_size, 1), + ) + for _ in range(len(dilation)) + ] + ) + + def get_padding(self, kernel_size, dilation=1): + return (kernel_size * dilation - dilation) // 2 + + def apply_weight_norm(self): + for layer in self.convs1: + nn.utils.weight_norm(layer) + for layer in self.convs2: + nn.utils.weight_norm(layer) + + def remove_weight_norm(self): + for layer in self.convs1: + nn.utils.remove_weight_norm(layer) + for layer in self.convs2: + nn.utils.remove_weight_norm(layer) + + def forward(self, hidden_states): + for conv1, conv2 in zip(self.convs1, self.convs2): + residual = hidden_states + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = conv1(hidden_states) + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = conv2(hidden_states) + hidden_states = hidden_states + residual + return hidden_states + + +class SeamlessM4Tv2VariancePredictor(nn.Module): + def __init__(self, embed_dim, hidden_dim, kernel_size, var_pred_dropout): + super().__init__() + + self.conv1 = nn.Conv1d( + embed_dim, + hidden_dim, + kernel_size=kernel_size, + padding="same", + ) + self.activation_fuction = nn.ReLU() + self.ln1 = nn.LayerNorm(hidden_dim) + self.dropout_module = nn.Dropout(p=var_pred_dropout) + self.conv2 = nn.Conv1d( + hidden_dim, + hidden_dim, + kernel_size=kernel_size, + padding="same", + ) + self.ln2 = nn.LayerNorm(hidden_dim) + self.proj = nn.Linear(hidden_dim, 1) + + def forward(self, hidden_states: Tensor, padding_mask: Tensor = None) -> Tensor: + # Input: B x T x C; Output: B x T + if padding_mask is not None: + hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) + hidden_states = self.conv1(hidden_states.transpose(1, 2)) + hidden_states = self.activation_fuction(hidden_states).transpose(1, 2) + hidden_states = self.dropout_module(self.ln1(hidden_states)) + if padding_mask is not None: + hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0) + hidden_states = self.conv2(hidden_states.transpose(1, 2)) + hidden_states = self.activation_fuction(hidden_states).transpose(1, 2) + hidden_states = self.dropout_module(self.ln2(hidden_states)) + return self.proj(hidden_states).squeeze(dim=2) + + +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4THifiGan with SeamlessM4T->SeamlessM4Tv2 +class SeamlessM4Tv2HifiGan(nn.Module): + def __init__(self, config: SeamlessM4Tv2Config): + super().__init__() + model_in_dim = config.unit_embed_dim + config.lang_embed_dim + config.spkr_embed_dim + self.leaky_relu_slope = config.leaky_relu_slope + self.num_kernels = len(config.resblock_kernel_sizes) + self.num_upsamples = len(config.upsample_rates) + self.conv_pre = nn.Conv1d( + model_in_dim, + config.upsample_initial_channel, + kernel_size=7, + stride=1, + padding=3, + ) + + self.upsampler = nn.ModuleList() + for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): + self.upsampler.append( + nn.ConvTranspose1d( + config.upsample_initial_channel // (2**i), + config.upsample_initial_channel // (2 ** (i + 1)), + kernel_size=kernel_size, + stride=upsample_rate, + padding=(kernel_size - upsample_rate) // 2, + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.upsampler)): + channels = config.upsample_initial_channel // (2 ** (i + 1)) + for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): + self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) + + self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) + + def forward(self, input_embeds: torch.FloatTensor) -> torch.FloatTensor: + r""" + Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch + of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech + waveform. + + Args: + spectrogram (`torch.FloatTensor`): + Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, + model_in_dim)`, or un-batched and of shape `(sequence_length, model_in_dim)`. Note that `model_in_dim` + is the sum of `config.unit_embed_dim`, `config.lang_embed_dim` and `config.spkr_embed_dim`. + + Returns: + `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of + shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`. + """ + + hidden_states = self.conv_pre(input_embeds) + for i in range(self.num_upsamples): + hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) + hidden_states = self.upsampler[i](hidden_states) + + res_state = self.resblocks[i * self.num_kernels](hidden_states) + for j in range(1, self.num_kernels): + res_state += self.resblocks[i * self.num_kernels + j](hidden_states) + hidden_states = res_state / self.num_kernels + + hidden_states = nn.functional.leaky_relu(hidden_states) + hidden_states = self.conv_post(hidden_states) + hidden_states = torch.tanh(hidden_states) + + # remove seq-len dim since this collapses to 1 + waveform = hidden_states.squeeze(1) + + return waveform + + +@add_start_docstrings( + """Code HiFi-GAN vocoder as described in this [repository](https://github.com/facebookresearch/speech-resynthesis).""", + HIFIGAN_START_DOCSTRING, +) +class SeamlessM4Tv2CodeHifiGan(PreTrainedModel): + config_class = SeamlessM4Tv2Config + main_input_name = "input_embeds" + _no_split_modules = [] + + def __init__(self, config): + super().__init__(config) + + self.pad_token_id = config.t2u_pad_token_id + embed_dim = config.unit_embed_dim + kernel_size = config.variance_predictor_kernel_size + var_pred_dropout = config.var_pred_dropout + self.dur_predictor = SeamlessM4Tv2VariancePredictor(embed_dim, embed_dim, kernel_size, var_pred_dropout) + + self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim) + self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim) + self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim) + + self.hifi_gan = SeamlessM4Tv2HifiGan(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_dur_output_lengths + def _get_dur_output_lengths(self, input_ids, dur_out): + """ + Computes the output length after the duration layer. + """ + unit_lengths = (input_ids != self.pad_token_id).sum(1) + + # take care of edge cases where no padding or too many padding + unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1) + + cumulative_dur_out = torch.cumsum(dur_out, dim=1) + unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze() + + return unit_lengths + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_output_hifigan_lengths + def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]): + """ + Computes the output length of the hifigan convolutional layers + """ + + def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1): + # 1D convolutional layer output length formula taken + # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html + return ( + torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode="floor") + 1 + ) + + def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1): + return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1 + + # conv_pre + input_lengths = _conv_out_length(input_lengths, 7, 1, 3) + + # upsampler + for i, (upsample_rate, kernel_size) in enumerate( + zip(self.config.upsample_rates, self.config.upsample_kernel_sizes) + ): + input_lengths = _transpose_conv_out_length( + input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2 + ) + + # resblock + for i in range(len(self.config.upsample_rates)): + for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes): + for dil in dilation: + input_lengths = _conv_out_length( + input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil + ) + + for dil in dilation: + input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1) + + # conv_post + input_lengths = _conv_out_length(input_lengths, 7, 1, 3) + + return input_lengths + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.forward with SeamlessM4T->SeamlessM4Tv2, spkr_id->speaker_id + def forward( + self, input_ids: torch.LongTensor, speaker_id: torch.Tensor, lang_id: torch.Tensor + ) -> Tuple[torch.Tensor]: + """ + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4Tv2TextToUnitForConditionalGeneration`]. [What are input + IDs?](../glossary#input-ids) + speaker_id (`int`, *optional*): + The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. + tgt_lang (`str`, *optional*): + The language id to use as target language for translation. + """ + hidden_states = self.unit_embedding(input_ids).transpose(1, 2) + spkr = self.speaker_embedding(speaker_id).transpose(1, 2) + lang = self.language_embedding(lang_id).transpose(1, 2) + + log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2)) + dur_out = torch.clamp(torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1) + # B x C x T + if hidden_states.size(0) == 1: + hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2) + else: + # if batched sample, need to interleave per sample, and pad -> loss of parallelism + if hidden_states.shape[0] > 1 and self.training: + logger.warning( + """`self.training=True` and you use batching. You lose parallelism during the hifigan + forward pass because the samples are interleaved.""" + ) + hidden_states = [ + torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1) + for (hidden_state, duration) in zip(hidden_states, dur_out) + ] + + hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2) + + spkr = spkr.repeat(1, 1, hidden_states.shape[-1]) + lang = lang.repeat(1, 1, hidden_states.shape[-1]) + hidden_states = torch.cat([lang, hidden_states, spkr], dim=1) + + hidden_states = self.hifi_gan(hidden_states) + + unit_lengths = self._get_dur_output_lengths(input_ids, dur_out) + lengths = self._get_output_hifigan_lengths(unit_lengths) + + return hidden_states, lengths + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._init_weights + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.apply_weight_norm + def apply_weight_norm(self): + nn.utils.weight_norm(self.hifi_gan.conv_pre) + for layer in self.hifi_gan.upsampler: + nn.utils.weight_norm(layer) + for layer in self.hifi_gan.resblocks: + layer.apply_weight_norm() + nn.utils.weight_norm(self.hifi_gan.conv_post) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.remove_weight_norm + def remove_weight_norm(self): + nn.utils.remove_weight_norm(self.hifi_gan.conv_pre) + for layer in self.hifi_gan.upsampler: + nn.utils.remove_weight_norm(layer) + for layer in self.hifi_gan.resblocks: + layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.hifi_gan.conv_post) + + +############ WHOLE MODEL related code ################ + + +@add_start_docstrings( + "The text-to-text SeamlessM4Tv2 Model transformer which can be used for T2TT.", + SEAMLESS_M4T_V2_START_DOCSTRING, +) +# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToText with SeamlessM4T->SeamlessM4Tv2,SeamlessM4Tv2Tokenizer->SeamlessM4TTokenizer, SeamlessM4Tv2Processor->SeamlessM4TProcessor +class SeamlessM4Tv2ForTextToText(SeamlessM4Tv2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"] + main_input_name = "input_ids" + + _tied_weights_keys = [ + "lm_head.weight", + "text_encoder.embed_tokens.weight", + "text_decoder.embed_tokens.weight", + ] + + def __init__(self, config: SeamlessM4Tv2Config): + super().__init__(config) + + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + + self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared) + self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.text_encoder + + def get_decoder(self): + return self.text_decoder + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def get_input_embeddings(self): + return self.text_decoder.embed_tokens + + def set_input_embeddings(self, value): + self.text_encoder.embed_tokens = value + self.text_decoder.embed_tokens = value + self.shared = value + + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.lm_head, self.shared) + + @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + encoder_attention_mask = attention_mask + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.text_decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(decoder_outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + outputs = decoder_outputs + encoder_outputs + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def generate( + self, + input_ids=None, + tgt_lang=None, + generation_config=None, + logits_processor=None, + stopping_criteria=None, + prefix_allowed_tokens_fn=None, + synced_gpus=False, + **kwargs, + ): + """ + Generates sequences of token ids. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Parameters: + input_ids (`torch.Tensor` of varying shape depending on the modality, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + tgt_lang (`str`, *optional*): + The language to use as target language for translation. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): + If provided, this function constraints the beam search to allowed tokens only at each step. If not + provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and + `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned + on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful + for constrained generation conditioned on the prefix, as described in [Autoregressive Entity + Retrieval](https://arxiv.org/abs/2010.00904). + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible + [`~utils.ModelOutput`] types are: + - [`~generation.GenerateEncoderDecoderOutput`], + - [`~generation.GenerateBeamEncoderDecoderOutput`] + """ + # prepare text_decoder_input_ids + text_decoder_input_ids = kwargs.pop("decoder_input_ids", None) + # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. + if tgt_lang is not None: + batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) + + if hasattr(self.generation_config, "text_decoder_lang_to_code_id"): + # also accept __xxx__ + tgt_lang = tgt_lang.replace("__", "") + if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id: + raise ValueError( + f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in + {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}""" + ) + # tgt_lang gets priority over decoder input ids + text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) + text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) + else: + raise ValueError( + """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps + the target language to the right token id. Make sure to load the right generation config.""" + ) + else: + # only a warning, otherwise errors appear in the tests + logger.warning( + """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get + a correct generation, otherwise the generation will probably make no sense.""" + ) + + return super().generate( + input_ids, + generation_config, + logits_processor, + stopping_criteria, + prefix_allowed_tokens_fn, + synced_gpus, + decoder_input_ids=text_decoder_input_ids, + **kwargs, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +@add_start_docstrings( + "The speech-to-text SeamlessM4Tv2 Model transformer which can be used for S2TT.", + SEAMLESS_M4T_V2_START_DOCSTRING, +) +class SeamlessM4Tv2ForSpeechToText(SeamlessM4Tv2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["text_decoder", "t2u_model", "vocoder"] + main_input_name = "input_features" + + _tied_weights_keys = [ + "lm_head.weight", + "text_decoder.embed_tokens.weight", + ] + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.__init__ with SeamlessM4T->SeamlessM4Tv2 + def __init__(self, config: SeamlessM4Tv2Config): + super().__init__(config) + + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config) + self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_encoder + def get_encoder(self): + return self.speech_encoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_decoder + def get_decoder(self): + return self.text_decoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_input_embeddings + def get_input_embeddings(self): + return self.text_decoder.embed_tokens + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.set_input_embeddings + def set_input_embeddings(self, value): + self.text_decoder.embed_tokens = value + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.lm_head, self.shared) + + @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING) + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.forward + def forward( + self, + input_features: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.speech_encoder( + input_features=input_features, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + encoder_attention_mask = attention_mask + if attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + encoder_outputs[0].device + ) + encoder_attention_mask = _compute_new_attention_mask( + hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.text_decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(decoder_outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + outputs = decoder_outputs + encoder_outputs + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.generate + def generate( + self, + input_features=None, + tgt_lang=None, + generation_config=None, + logits_processor=None, + stopping_criteria=None, + prefix_allowed_tokens_fn=None, + synced_gpus=False, + **kwargs, + ): + """ + Generates sequences of token ids. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Parameters: + input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): + Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the + [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. + + tgt_lang (`str`, *optional*): + The language to use as target language for translation. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complement the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): + If provided, this function constraints the beam search to allowed tokens only at each step. If not + provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and + `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned + on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful + for constrained generation conditioned on the prefix, as described in [Autoregressive Entity + Retrieval](https://arxiv.org/abs/2010.00904). + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible + [`~utils.ModelOutput`] types are: + - [`~generation.GenerateEncoderDecoderOutput`], + - [`~generation.GenerateBeamEncoderDecoderOutput`] + """ + text_decoder_input_ids = kwargs.pop("decoder_input_ids", None) + # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. + if tgt_lang is not None: + inputs = kwargs.get("input_embeds") if input_features is None else input_features + inputs = ( + inputs + if inputs is not None + else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"] + ) + batch_size = len(inputs) + + if hasattr(self.generation_config, "text_decoder_lang_to_code_id"): + # also accept __xxx__ + tgt_lang = tgt_lang.replace("__", "") + if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id: + raise ValueError( + f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in + {', '.join(self.generation_config.text_decoder_lang_to_code_id.keys())}""" + ) + # tgt_lang gets priority over decoder input ids + text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) + text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) + else: + raise ValueError( + """This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps + the target language to the right token id. Make sure to load the right generation config.""" + ) + else: + # only a warning, otherwise errors appear in the tests + logger.warning( + """You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get + a correct generation, otherwise the generation will probably make no sense.""" + ) + return super().generate( + input_features, + generation_config, + logits_processor, + stopping_criteria, + prefix_allowed_tokens_fn, + synced_gpus, + decoder_input_ids=text_decoder_input_ids, + **kwargs, + ) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, + } + + @staticmethod + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +@add_start_docstrings( + "The text-to-speech SeamlessM4Tv2 Model transformer which can be used for T2ST.", + SEAMLESS_M4T_V2_START_DOCSTRING, +) +class SeamlessM4Tv2ForTextToSpeech(SeamlessM4Tv2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["speech_encoder"] + main_input_name = "input_ids" + + _tied_weights_keys = [ + "lm_head.weight", + "text_encoder.embed_tokens.weight", + "text_decoder.embed_tokens.weight", + ] + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.__init__ with SeamlessM4T->SeamlessM4Tv2 + def __init__(self, config: SeamlessM4Tv2Config): + super().__init__(config) + + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + + self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared) + self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config) + self.vocoder = SeamlessM4Tv2CodeHifiGan(config) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_encoder + def get_encoder(self): + return self.text_encoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_decoder + def get_decoder(self): + return self.text_decoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.get_input_embeddings + def get_input_embeddings(self): + return self.text_decoder.embed_tokens + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.set_input_embeddings + def set_input_embeddings(self, value): + self.text_encoder.embed_tokens = value + self.text_decoder.embed_tokens = value + self.shared = value + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.lm_head, self.shared) + + @add_start_docstrings_to_model_forward(M4T_TEXT_INPUTS_DOCSTRING) + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.forward with SeamlessM4T->SeamlessM4Tv2 + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn + logger.warning( + "This is the same forward method as `SeamlessM4Tv2ForTextToText`." + "It doesn't use the text-to-unit model `SeamlessM4Tv2TextToUnitForConditionalGeneration`." + "If you want to generate speech, use the `.generate` method." + ) + encoder_outputs = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + encoder_attention_mask = attention_mask + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.text_decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(decoder_outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + outputs = decoder_outputs + encoder_outputs + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + @torch.no_grad() + def generate( + self, + input_ids: Optional[torch.Tensor] = None, + return_intermediate_token_ids: Optional[bool] = None, + tgt_lang: Optional[str] = None, + speaker_id: Optional[int] = 0, + **kwargs, + ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]: + """ + Generates translated audio waveforms. + + + + This method successively calls the `.generate` function of two different sub-models. You can specify keyword + arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments + that will be passed to one of them. + + For example, calling `.generate(input_ids, num_beams=4, speech_do_sample=True)` will successively perform + beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + return_intermediate_token_ids (`bool`, *optional*): + If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want + to get translated text alongside the audio. + tgt_lang (`str`, *optional*): + The language to use as target language for translation. + speaker_id (`int`, *optional*, defaults to 0): + The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. + kwargs (*optional*): + Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword + arguments are of two types: + + - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, + except for `decoder_input_ids` which will only be passed through the text components. + - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the + text model and speech model respectively. It has the priority over the keywords without a prefix. + + This means you can, for example, specify a generation strategy for one generation but not for the + other. + + + Returns: + `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`: + - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. + - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, + sequence_length)`and and `waveform_lengths` which gives the length of each sample. + """ + batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) + + if tgt_lang is None: + raise ValueError("You must specify a `tgt_lang` to generate translated speech.") + else: + # also accept __xxx__ + tgt_lang = tgt_lang.replace("__", "") + for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: + lang_code_to_id = getattr(self.generation_config, key, None) + if lang_code_to_id is None: + raise ValueError( + f"""This model generation config doesn't have a `{key}` key which maps the target language + to the right token id. Make sure to load the right generation config.""" + ) + elif tgt_lang not in lang_code_to_id: + raise ValueError( + f"""`tgt_lang={tgt_lang}` is not supported by this model. + Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports + more languages for text translation than for speech synthesis.""" + ) + + kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) + kwargs_text["output_hidden_states"] = True + kwargs_text["return_dict_in_generate"] = True + kwargs_text["output_scores"] = True + + text_decoder_input_ids = kwargs_text.get("decoder_input_ids") + + # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. + text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) + text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) + + kwargs_text["decoder_input_ids"] = text_decoder_input_ids + + # first generation + text_generation_output = super().generate(input_ids, **kwargs_text) + sequences = text_generation_output.sequences + + # prepare second generation + num_return_sequences = len(sequences) // batch_size + attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) + + if attention_mask is not None: + # repeat attention mask alongside batch dimension + attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0) + encoder_hidden_states = text_generation_output.encoder_hidden_states[-1] + + # repeat attention mask alongside batch dimension + encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0) + + # get decoder last hidden state - must do a pass through the text decoder + t2u_input_embeds = self.text_decoder( + input_ids=sequences[:, :-1], # Manually trim the final EOS token + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=attention_mask, + ).last_hidden_state + + pad_token_id = self.generation_config.pad_token_id + + # Compute new attention mask + seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1) + t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) + kwargs_speech["attention_mask"] = t2u_model_attention_mask + + # REMOVE EOS and lang_id + t2u_input_ids = sequences[:, 2:-1] + # replace every other EOS + t2u_input_ids = torch.masked_fill( + t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id + ) + + # compute t2u_char_input_ids + t2u_subwords = self._indices_to_subwords(t2u_input_ids) + t2u_char_count_per_id = self._count_character_length_in_subword( + t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id + ) + + # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode. + pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1)) + t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1) + t2u_char_input_ids = self._get_char_input_ids( + t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id + ) + + # second pass + t2u_output = self.t2u_model( + inputs_embeds=t2u_input_embeds, + char_input_ids=t2u_char_input_ids, + char_count_per_id=t2u_char_count_per_id, + **kwargs_speech, + ) + + t2u_logits = t2u_output[0] + padding_mask = t2u_output[1].bool() + + # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature + temperature = kwargs_speech.get("temperature", None) + if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False): + unit_ids = t2u_logits.argmax(dim=-1) + else: + t2u_logits = t2u_logits / temperature + # apply softmax + probs = nn.functional.softmax(t2u_logits, dim=-1) + # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size) + probs = probs.reshape((-1, probs.shape[2])) + # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) + unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1) + + output_unit_ids = unit_ids.detach().clone() + + replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask) + # replace eos per pad + unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id) + + # offset of control symbols + unit_ids = torch.where( + unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset + ) + + vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) + vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) + + speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device) + + waveform, waveform_lengths = self.vocoder( + input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id + ) + + if return_intermediate_token_ids: + return SeamlessM4Tv2GenerationOutput( + waveform=waveform, + waveform_lengths=waveform_lengths, + sequences=sequences, + unit_sequences=output_unit_ids, + ) + + return waveform, waveform_lengths + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, + } + + @staticmethod + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToSpeech._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +@add_start_docstrings( + "The speech-to-speech SeamlessM4Tv2 Model transformer which can be used for S2ST.", + SEAMLESS_M4T_V2_START_DOCSTRING, +) +class SeamlessM4Tv2ForSpeechToSpeech(SeamlessM4Tv2PreTrainedModel): + _keys_to_ignore_on_load_missing = ["text_encoder"] + main_input_name = "input_features" + + _tied_weights_keys = [ + "lm_head.weight", + "text_decoder.embed_tokens.weight", + ] + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.__init__ with SeamlessM4T->SeamlessM4Tv2 + def __init__(self, config): + super().__init__(config) + + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config) + self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config) + self.vocoder = SeamlessM4Tv2CodeHifiGan(config) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_encoder + def get_encoder(self): + return self.speech_encoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_decoder + def get_decoder(self): + return self.text_decoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.get_input_embeddings + def get_input_embeddings(self): + return self.text_decoder.embed_tokens + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.set_input_embeddings + def set_input_embeddings(self, value): + self.text_decoder.embed_tokens = value + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.lm_head, self.shared) + + @add_start_docstrings_to_model_forward(M4T_SPEECH_INPUTS_DOCSTRING) + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.forward with SeamlessM4T->SeamlessM4Tv2 + def forward( + self, + input_features: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn + logger.warning( + "This is the same forward method as `SeamlessM4Tv2ForSpeechToText`. It doesn't use `self.t2u_model`." + "If you want to generate speech, use the `generate` method." + ) + + encoder_outputs = self.speech_encoder( + input_features=input_features, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + encoder_attention_mask = attention_mask + if attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + encoder_outputs[0].device + ) + encoder_attention_mask = _compute_new_attention_mask( + hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.text_decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(decoder_outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + outputs = decoder_outputs + encoder_outputs + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + @torch.no_grad() + def generate( + self, + input_features: Optional[torch.Tensor] = None, + return_intermediate_token_ids: Optional[bool] = None, + tgt_lang: Optional[str] = None, + speaker_id: Optional[int] = 0, + **kwargs, + ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]: + """ + Generates translated audio waveforms. + + + + This method successively calls the `.generate` function of two different sub-models. You can specify keyword + arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments + that will be passed to one of them. + + For example, calling `.generate(input_features, num_beams=4, speech_do_sample=True)` will successively perform + beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + Args: + input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`): + Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the + [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. + return_intermediate_token_ids (`bool`, *optional*): + If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want + to get translated text alongside the audio. + tgt_lang (`str`, *optional*): + The language to use as target language for translation. + speaker_id (`int`, *optional*, defaults to 0): + The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. + + kwargs (*optional*): + Remaining dictionary of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword + arguments are of two types: + + - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, + except for `decoder_input_ids` which will only be passed through the text components. + - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the + text model and speech model respectively. It has the priority over the keywords without a prefix. + + This means you can, for example, specify a generation strategy for one generation but not for the + other. + + + Returns: + `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`: + - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. + - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, + sequence_length)`and and `waveform_lengths` which gives the length of each sample. + """ + batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds")) + + if tgt_lang is None: + raise ValueError("You must specify a `tgt_lang` to generate translated speech.") + else: + # also accept __xxx__ + tgt_lang = tgt_lang.replace("__", "") + for key in ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]: + lang_code_to_id = getattr(self.generation_config, key, None) + if lang_code_to_id is None: + raise ValueError( + f"""This model generation config doesn't have a `{key}` key which maps the target language + to the right token id. Make sure to load the right generation config.""" + ) + elif tgt_lang not in lang_code_to_id: + raise ValueError( + f"""`tgt_lang={tgt_lang}` is not supported by this model. + Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports + more languages for text translation than for speech synthesis.""" + ) + + kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) + kwargs_text["output_hidden_states"] = True + kwargs_text["return_dict_in_generate"] = True + kwargs_text["output_scores"] = True + + text_decoder_input_ids = kwargs_text.get("decoder_input_ids") + # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. + text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) + text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) + + kwargs_text["decoder_input_ids"] = text_decoder_input_ids + + # first generation + text_generation_output = super().generate(input_features, **kwargs_text) + sequences = text_generation_output.sequences + + # prepare second generation + num_return_sequences = len(sequences) // batch_size + attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) + + # get last_hidden_state from encoder + encoder_hidden_states = self.speech_encoder(input_features=input_features, attention_mask=attention_mask)[0] + + # input modality = speech so new attention mask for the decoder + if attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + encoder_hidden_states.device + ) + attention_mask = _compute_new_attention_mask( + hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths + ) + + # repeat attention mask alongside batch dimension + attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0) + + # repeat attention mask alongside batch dimension + encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0) + + # get decoder last hidden state - must do a pass through the text decoder + t2u_input_embeds = self.text_decoder( + input_ids=sequences[:, :-1], # Manually trim the final EOS token + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=attention_mask, + ).last_hidden_state + + pad_token_id = self.generation_config.pad_token_id + + # Compute new attention mask + seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1) + t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) + kwargs_speech["attention_mask"] = t2u_model_attention_mask + + # REMOVE EOS and lang_id + t2u_input_ids = sequences[:, 2:-1] + # replace every other EOS + t2u_input_ids = torch.masked_fill( + t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id + ) + + # compute t2u_char_input_ids + t2u_subwords = self._indices_to_subwords(t2u_input_ids) + t2u_char_count_per_id = self._count_character_length_in_subword( + t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id + ) + + # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode. + pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1)) + t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1) + t2u_char_input_ids = self._get_char_input_ids( + t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id + ) + + # second pass + t2u_output = self.t2u_model( + inputs_embeds=t2u_input_embeds, + char_input_ids=t2u_char_input_ids, + char_count_per_id=t2u_char_count_per_id, + **kwargs_speech, + ) + + t2u_logits = t2u_output[0] + padding_mask = t2u_output[1].bool() + + # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature + temperature = kwargs_speech.get("temperature", None) + if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False): + unit_ids = t2u_logits.argmax(dim=-1) + else: + t2u_logits = t2u_logits / temperature + # apply softmax + probs = nn.functional.softmax(t2u_logits, dim=-1) + # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size) + probs = probs.reshape((-1, probs.shape[2])) + # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) + unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1) + + output_unit_ids = unit_ids.detach().clone() + + replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask) + # replace eos per pad + unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id) + + # offset of control symbols + unit_ids = torch.where( + unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset + ) + + vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) + vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) + + speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device) + + waveform, waveform_lengths = self.vocoder( + input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id + ) + + if return_intermediate_token_ids: + return SeamlessM4Tv2GenerationOutput( + waveform=waveform, + waveform_lengths=waveform_lengths, + sequences=sequences, + unit_sequences=output_unit_ids, + ) + + return waveform, waveform_lengths + + @staticmethod + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToSpeech.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, + } + + +@add_start_docstrings( + "The original SeamlessM4Tv2 Model transformer which can be used for every tasks available (S2ST, S2TT, T2TT, T2ST).", + SEAMLESS_M4T_V2_START_DOCSTRING, + """ + current_modality (`str`, *optional*, defaults to `"text"`): + Default modality. Used only to initialize the model. It can be set to `"text"` or `"speech"`. + This will be updated automatically according to the modality passed to the forward and generate passes (`input_ids` for text and `input_features` for audio). + """, +) +class SeamlessM4Tv2Model(SeamlessM4Tv2PreTrainedModel): + _tied_weights_keys = [ + "lm_head.weight", + "text_encoder.embed_tokens.weight", + "text_decoder.embed_tokens.weight", + ] + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.__init__ with SeamlessM4T->SeamlessM4Tv2 + def __init__(self, config, current_modality="text"): + super().__init__(config) + + self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) + + self.text_encoder = SeamlessM4Tv2Encoder(config, self.shared) + self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config) + self.text_decoder = SeamlessM4Tv2Decoder(config, self.shared) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + self.current_modality = current_modality + if current_modality == "speech": + self.main_input_name = "input_features" + + # these models already call post_init in their initialization + self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config) + self.vocoder = SeamlessM4Tv2CodeHifiGan(config) + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_modality + def set_modality(self, modality="text"): + if modality == "text": + self.main_input_name = "input_ids" + self.current_modality = "text" + elif modality == "speech": + self.main_input_name = "input_features" + self.current_modality = "speech" + else: + raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.") + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_encoder + def get_encoder(self): + if self.current_modality == "text": + return self.text_encoder + else: + return self.speech_encoder + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_input_embeddings + def get_input_embeddings(self): + return self.text_decoder.embed_tokens + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_input_embeddings + def set_input_embeddings(self, value): + self.text_encoder.embed_tokens = value + self.text_decoder.embed_tokens = value + self.shared = value + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.text_encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.text_decoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.lm_head, self.shared) + + @add_start_docstrings_to_model_forward(M4T_MODEL_INPUTS_DOCSTRING) + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.forward with SeamlessM4T->SeamlessM4Tv2 + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + input_features: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None: + raise ValueError( + "`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not." + ) + elif input_features is not None: + if input_ids is not None: + logger.warning( + "`input_ids` is not `None` but `input_features` has been given." + "`input_features` will be used in priority through the `speech_encoder`. " + "Make sure that `input_features` and `input_ids` are mutually exclusive." + ) + + if inputs_embeds is not None: + logger.warning( + "`inputs_embeds` is not `None` but `input_features` has been given." + "`input_features` will be used in priority through `speech_encoder`. " + "`inputs_embeds` will be ignored." + ) + + # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn + logger.warning( + "This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`" + "depending on the input modality. If you want to generate speech, use the `generate` method." + ) + + self.set_modality("speech") + + encoder_outputs = self.speech_encoder( + input_features=input_features, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + elif input_ids is not None or inputs_embeds is not None: + # if encoder_outputs is not None, it's probably used within a .generate method so no need to warn + logger.warning( + "This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`" + "depending on the input modality. If you want to generate speech, use the `generate` method." + ) + self.set_modality("text") + encoder_outputs = self.text_encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + encoder_attention_mask = attention_mask + # input modality = speech so new attention mask + if self.current_modality == "speech" and attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + encoder_outputs[0].device + ) + encoder_attention_mask = _compute_new_attention_mask( + hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.text_decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + lm_logits = self.lm_head(decoder_outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + labels = labels.to(lm_logits.device) + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + outputs = decoder_outputs + encoder_outputs + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + @torch.no_grad() + def generate( + self, + input_ids: Optional[torch.Tensor] = None, + input_features: Optional[torch.Tensor] = None, + return_intermediate_token_ids: Optional[bool] = None, + tgt_lang: Optional[str] = None, + speaker_id: Optional[int] = 0, + generate_speech: Optional[bool] = True, + **kwargs, + ) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]: + """ + Generates translated token ids and/or translated audio waveforms. + + + + This method successively calls the `.generate` function of two different sub-models. You can specify keyword + arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments + that will be passed to one of them. + + For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively + perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model. + + For an overview of generation strategies and code examples, check out the [following + guide](./generation_strategies). + + + + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*): + Input audio features. This should be returnes by the [`SeamlessM4TFeatureExtractor`] class or the + [`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details. + return_intermediate_token_ids (`bool`, *optional*): + If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want + to get translated text alongside the audio. Note that if `generate_speech=True`, this parameter will be + ignored. + tgt_lang (`str`, *optional*): + The language to use as target language for translation. + speaker_id (`int`, *optional*, defaults to 0): + The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`. + generate_speech (`bool`, *optional*, defaults to `True`): + If `False`, will only returns the text tokens and won't generate speech. + + kwargs (*optional*): + Remaining dictioy of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword + arguments are of two types: + + - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model, + except for `decoder_input_ids` which will only be passed through the text components. + - With a *text_* or *speech_* prefix, they will be input for the `generate` method of the + text model and speech model respectively. It has the priority over the keywords without a prefix. + + This means you can, for example, specify a generation strategy for one generation but not for the + other. + + Returns: + `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor], ModelOutput]`: + - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. + - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of + shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. + - If `generate_speech=False`, it will returns `ModelOutput`. + """ + if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None: + raise ValueError( + "`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not." + ) + + if generate_speech and tgt_lang is None: + raise ValueError("You must specify a `tgt_lang` to generate translated speech.") + + if tgt_lang is not None: + # also accept __xxx__ + tgt_lang = tgt_lang.replace("__", "") + if generate_speech: + keys_to_check = ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"] + else: + keys_to_check = ["text_decoder_lang_to_code_id"] + for key in keys_to_check: + lang_code_to_id = getattr(self.generation_config, key, None) + if lang_code_to_id is None: + raise ValueError( + f"""This model generation config doesn't have a `{key}` key which maps the target language + to the right token id. Make sure to load the right generation config.""" + ) + elif tgt_lang not in lang_code_to_id: + raise ValueError( + f"""`tgt_lang={tgt_lang}` is not supported by this model. + Please specify a `tgt_lang` in {','.join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports + more languages for text translation than for speech synthesis.""" + ) + + batch_size = ( + len(input_features) + if input_features is not None + else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))) + ) + + kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs) + kwargs_text["output_hidden_states"] = True + kwargs_text["return_dict_in_generate"] = True + kwargs_text["output_scores"] = True + + text_decoder_input_ids = kwargs_text.get("decoder_input_ids") + # overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids. + if tgt_lang is not None: + # tgt_lang gets priority over decoder input ids + text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang) + text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size).to(self.device) + + kwargs_text["decoder_input_ids"] = text_decoder_input_ids + + # first generation + if input_features is not None: + self.set_modality("speech") + if input_ids is not None: + logger.warning( + "`input_features` and `input_ids` are both non empty. `input_features` will be used in priority " + "through the speech encoder. Make sure `input_features=None` if you want to use the text encoder." + ) + text_generation_output = super().generate(input_features=input_features, **kwargs_text) + else: + self.set_modality("text") + text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text) + sequences = text_generation_output.sequences + + if not generate_speech: + return text_generation_output + + # prepare second generation + num_return_sequences = len(sequences) // batch_size + attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None)) + + # get encoder last hidden states + if self.current_modality == "speech": + # get last_hidden_state from encoder - must do a pass through the speech encoder + encoder_hidden_states = self.speech_encoder( + input_features=input_features, attention_mask=attention_mask + ).last_hidden_state + + # input modality = speech so new attention mask for the decoder + if attention_mask is not None: + sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to( + encoder_hidden_states.device + ) + attention_mask = _compute_new_attention_mask( + hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths + ) + else: + encoder_hidden_states = text_generation_output.encoder_hidden_states[-1] + + if attention_mask is not None: + # repeat attention mask alongside batch dimension + attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0) + + # repeat attention mask alongside batch dimension + encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0) + + # get decoder last hidden state - must do a pass through the text decoder + t2u_input_embeds = self.text_decoder( + input_ids=sequences[:, :-1], # Manually trim the final EOS token + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=attention_mask, + ).last_hidden_state + + pad_token_id = self.generation_config.pad_token_id + + # Compute new attention mask + seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1) + t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens) + kwargs_speech["attention_mask"] = t2u_model_attention_mask + + # REMOVE EOS and lang_id + t2u_input_ids = sequences[:, 2:-1] + # replace every other EOS + t2u_input_ids = torch.masked_fill( + t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id + ) + + # compute t2u_char_input_ids + t2u_subwords = self._indices_to_subwords(t2u_input_ids) + t2u_char_count_per_id = self._count_character_length_in_subword( + t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id + ) + + # Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode. + pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1)) + t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1) + t2u_char_input_ids = self._get_char_input_ids( + t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id + ) + + # second pass + t2u_output = self.t2u_model( + inputs_embeds=t2u_input_embeds, + char_input_ids=t2u_char_input_ids, + char_count_per_id=t2u_char_count_per_id, + **kwargs_speech, + ) + + t2u_logits = t2u_output[0] + padding_mask = t2u_output[1].bool() + + # The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature + temperature = kwargs_speech.get("temperature", None) + if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False): + unit_ids = t2u_logits.argmax(dim=-1) + else: + t2u_logits = t2u_logits / temperature + # apply softmax + probs = nn.functional.softmax(t2u_logits, dim=-1) + # reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size) + probs = probs.reshape((-1, probs.shape[2])) + # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) + unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1) + + output_unit_ids = unit_ids.detach().clone() + + replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask) + # replace eos per pad + unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id) + + # offset of control symbols + unit_ids = torch.where( + unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset + ) + + vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang) + vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids)).to(self.device) + + speaker_id = torch.tensor([[speaker_id]] * len(unit_ids)).to(self.device) + + waveform, waveform_lengths = self.vocoder( + input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id + ) + + if return_intermediate_token_ids: + return SeamlessM4Tv2GenerationOutput( + waveform=waveform, + waveform_lengths=waveform_lengths, + sequences=sequences, + unit_sequences=output_unit_ids, + ) + + return waveform, waveform_lengths + + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, + } + + @staticmethod + # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/umt5/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e68ae4cb3737cf7fce75b980e4fc19e6ba93361d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/umt5/__init__.py @@ -0,0 +1,60 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = {"configuration_umt5": ["UMT5Config", "UMT5OnnxConfig"]} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_umt5"] = [ + "UMT5EncoderModel", + "UMT5ForConditionalGeneration", + "UMT5ForQuestionAnswering", + "UMT5ForSequenceClassification", + "UMT5ForTokenClassification", + "UMT5Model", + "UMT5PreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_umt5 import UMT5Config, UMT5OnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_umt5 import ( + UMT5EncoderModel, + UMT5ForConditionalGeneration, + UMT5ForQuestionAnswering, + UMT5ForSequenceClassification, + UMT5ForTokenClassification, + UMT5Model, + UMT5PreTrainedModel, + ) +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..779310ab02ad362b2750b441eeb00f6337b72e33 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/configuration_umt5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/configuration_umt5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..368e4e4590a22f79aa136bdff2c6a013be8010b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/configuration_umt5.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/convert_umt5_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/convert_umt5_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dae076caaf08bdf180dd7c43acd15b49aa5bb6e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/convert_umt5_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/modeling_umt5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/modeling_umt5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8acd82ef41852fdf947e6bea0e499f75de399b7c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/umt5/__pycache__/modeling_umt5.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/configuration_umt5.py b/venv/lib/python3.10/site-packages/transformers/models/umt5/configuration_umt5.py new file mode 100644 index 0000000000000000000000000000000000000000..9365717c282ae620e23363a12bfe7ee847d0efd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/umt5/configuration_umt5.py @@ -0,0 +1,172 @@ +# coding=utf-8 +# Copyright 2023, The T5 Authors and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" UMT5 model configuration""" +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxSeq2SeqConfigWithPast +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class UMT5Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`UMT5Model`]. It is used to instantiate a UMT5 + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the UMT5 + [google/umt5-small](https://huggingface.co/google/umt5-small) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Arguments: + vocab_size (`int`, *optional*, defaults to 250112): + Vocabulary size of the UMT5 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`UMT5Model`] or [`TFUMT5Model`]. + d_model (`int`, *optional*, defaults to 512): + Size of the encoder layers and the pooler layer. + d_kv (`int`, *optional*, defaults to 64): + Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // + num_heads`. + d_ff (`int`, *optional*, defaults to 1024): + Size of the intermediate feed forward layer in each `UMT5Block`. + num_layers (`int`, *optional*, defaults to 8): + Number of hidden layers in the Transformer encoder. + num_decoder_layers (`int`, *optional*): + Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set. + num_heads (`int`, *optional*, defaults to 6): + Number of attention heads for each attention layer in the Transformer encoder. + relative_attention_num_buckets (`int`, *optional*, defaults to 32): + The number of buckets to use for each attention layer. + relative_attention_max_distance (`int`, *optional*, defaults to 128): + The maximum distance of the longer sequences for the bucket separation. + dropout_rate (`float`, *optional*, defaults to 0.1): + The ratio for all dropout layers. + classifier_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for classifier. + layer_norm_eps (`float`, *optional*, defaults to 1e-6): + The epsilon used by the layer normalization layers. + initializer_factor (`float`, *optional*, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`): + Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + """ + + model_type = "umt5" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} + + def __init__( + self, + vocab_size=250112, + d_model=512, + d_kv=64, + d_ff=1024, + num_layers=8, + num_decoder_layers=None, + num_heads=6, + relative_attention_num_buckets=32, + relative_attention_max_distance=128, + dropout_rate=0.1, + layer_norm_epsilon=1e-6, + initializer_factor=1.0, + feed_forward_proj="gated-gelu", + is_encoder_decoder=True, + use_cache=True, + tokenizer_class="T5Tokenizer", + tie_word_embeddings=True, + pad_token_id=0, + eos_token_id=1, + decoder_start_token_id=0, + classifier_dropout=0.0, + **kwargs, + ): + self.vocab_size = vocab_size + self.d_model = d_model + self.d_kv = d_kv + self.d_ff = d_ff + self.num_layers = num_layers + self.num_decoder_layers = ( + num_decoder_layers if num_decoder_layers is not None else self.num_layers + ) # default = symmetry + self.num_heads = num_heads + self.relative_attention_num_buckets = relative_attention_num_buckets + self.relative_attention_max_distance = relative_attention_max_distance + self.dropout_rate = dropout_rate + self.classifier_dropout = classifier_dropout + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_factor = initializer_factor + self.feed_forward_proj = feed_forward_proj + self.use_cache = use_cache + + act_info = self.feed_forward_proj.split("-") + self.dense_act_fn = act_info[-1] + self.is_gated_act = act_info[0] == "gated" + + if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2: + raise ValueError( + f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. " + "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " + "'gated-gelu' or 'relu'" + ) + + if feed_forward_proj == "gated-gelu": + self.dense_act_fn = "gelu_new" + + super().__init__( + is_encoder_decoder=is_encoder_decoder, + tokenizer_class=tokenizer_class, + tie_word_embeddings=tie_word_embeddings, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + decoder_start_token_id=decoder_start_token_id, + **kwargs, + ) + + +class UMT5OnnxConfig(OnnxSeq2SeqConfigWithPast): + @property + # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = { + "input_ids": {0: "batch", 1: "encoder_sequence"}, + "attention_mask": {0: "batch", 1: "encoder_sequence"}, + } + if self.use_past: + common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" + common_inputs["decoder_input_ids"] = {0: "batch"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + else: + common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} + + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + + return common_inputs + + @property + # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset + def default_onnx_opset(self) -> int: + return 13 + + @property + def atol_for_validation(self) -> float: + return 5e-4 diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..eeb5b3eb400ea6e64b83cd7fcabbc97eb7d0445d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/umt5/convert_umt5_checkpoint_to_pytorch.py @@ -0,0 +1,274 @@ +# coding=utf-8 +# Copyright 2023 Google LLC and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Convert T5X checkpoint to PyTorch + +Steps: +- Install gsutil according to https://cloud.google.com/storage/docs/gsutil_install +- Get a T5X checkpoint at https://github.com/google-research/t5x/blob/main/docs/models.md#t5-11-checkpoints Example: + `gsutil -m cp -r gs://t5-data/pretrained_models/t5x/t5_1_1_small $HOME/` +- Create or download a corresponding config for the downloaded model. E.g. for T5 v1.1 small, you can use + https://huggingface.co/google/t5-v1_1-small/blob/main/config.json +- Convert: + ``` + python3 convert_t5x_checkpoint_to_pytorch.py --t5x_checkpoint_path=$HOME/t5_1_1_small --config_file=config.json\ + --pytorch_dump_path=$HOME/t5_1_1_small_pt + ``` +""" + +import argparse +import collections + +import numpy as np +import torch +from flax import traverse_util +from t5x import checkpoints + +from transformers import MT5Config, UMT5EncoderModel, UMT5ForConditionalGeneration +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def t5x_relpos_bias_lookup(params, i, prefix): + """Returns the Relative Position Bias parameters of a layer. Does not transpose.""" + return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] + + +def t5x_attention_lookup(params, i, prefix, layer_name="attention"): + """Returns the KOQV parameters of (self-)attention. Does not transpose.""" + k_tmp = k_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :]) + k = k_tmp.reshape(k_tmp.shape[0], k_tmp.shape[1] * k_tmp.shape[2]) + o_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :]) + o = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1], o_tmp.shape[2]) + q_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :]) + q = q_tmp.reshape(q_tmp.shape[0], q_tmp.shape[1] * q_tmp.shape[2]) + v_tmp = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :]) + v = v_tmp.reshape(v_tmp.shape[0], v_tmp.shape[1] * v_tmp.shape[2]) + return k, o, q, v + + +def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False): + """Returns the MLP parameters of a layer. Does not transpose.""" + if split_mlp_wi: + wi_0 = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] + wi_1 = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] + wi = (wi_0, wi_1) + else: + wi = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] + + wo = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] + return wi, wo + + +def t5x_layer_norm_lookup(params, i, prefix, layer_name): + """Returns the layer norm param of a layer.""" + return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] + + +def convert_t5x_to_pytorch( + variables: dict, *, num_layers: int, is_encoder_only: bool, scalable_attention: bool = False +): + """Converts the parameters from T5X-Flax to Transformers-PyTorch.""" + old = traverse_util.flatten_dict(variables["target"]) + old = {"/".join(k): v for k, v in old.items()} + + # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi + split_mlp_wi = "encoder/encoder/mlp/wi_0/kernel" in old + print("Split MLP:", split_mlp_wi) + + new = collections.OrderedDict() + + # Shared embeddings. + new["shared.weight"] = old["token_embedder/embedding"] + + # Encoder. + for i in range(num_layers): + # Block i, layer 0 (Self Attention). + layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_attention_layer_norm") + k, o, q, v = t5x_attention_lookup(old, i, "encoder", "attention") + new[f"encoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm + new[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T + new[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T + new[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T + new[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T + + # Block i, layer 1 (MLP). + layer_norm = t5x_layer_norm_lookup(old, i, "encoder", "pre_mlp_layer_norm") + wi, wo = t5x_mlp_lookup(old, i, "encoder", split_mlp_wi) + new[f"encoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm + if split_mlp_wi: + new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = wi[0].T + new[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = wi[1].T + else: + new[f"encoder.block.{i}.layer.1.DenseReluDense.wi.weight"] = wi.T + new[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = wo.T + if scalable_attention: + # convert the rel_embedding of each layer + new[f"encoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( + old, i, "encoder" + ).T + + new["encoder.final_layer_norm.weight"] = old["encoder/encoder_norm/scale"] + + if not scalable_attention: + new["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( + old, 0, "encoder" + ).T + new["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = t5x_relpos_bias_lookup( + old, 0, "decoder" + ).T + + if not is_encoder_only: + # Decoder. + for i in range(num_layers): + # Block i, layer 0 (Self Attention). + layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_self_attention_layer_norm") + k, o, q, v = t5x_attention_lookup(old, i, "decoder", "self_attention") + new[f"decoder.block.{i}.layer.0.layer_norm.weight"] = layer_norm + new[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = k.T + new[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = o.T + new[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = q.T + new[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = v.T + + # Block i, layer 1 (Cross Attention). + layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_cross_attention_layer_norm") + k, o, q, v = t5x_attention_lookup(old, i, "decoder", "encoder_decoder_attention") + new[f"decoder.block.{i}.layer.1.layer_norm.weight"] = layer_norm + new[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = k.T + new[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = o.T + new[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = q.T + new[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = v.T + + # Block i, layer 2 (MLP). + layer_norm = t5x_layer_norm_lookup(old, i, "decoder", "pre_mlp_layer_norm") + wi, wo = t5x_mlp_lookup(old, i, "decoder", split_mlp_wi) + new[f"decoder.block.{i}.layer.2.layer_norm.weight"] = layer_norm + if split_mlp_wi: + new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = wi[0].T + new[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = wi[1].T + else: + new[f"encoder.block.{i}.layer.2.DenseReluDense.wi.weight"] = wi.T + new[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = wo.T + + if scalable_attention: + # convert the rel_embedding of each layer + new[ + f"decoder.block.{i}.layer.0.SelfAttention.relative_attention_bias.weight" + ] = t5x_relpos_bias_lookup(old, i, "decoder").T + + new["decoder.final_layer_norm.weight"] = old["decoder/decoder_norm/scale"] + + # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) + if "decoder/logits_dense/kernel" in old: + new["lm_head.weight"] = old["decoder/logits_dense/kernel"].T + + return new + + +def make_state_dict(converted_params, is_encoder_only: bool): + """Prepares a state dict for the PyTorch model.""" + # Make a state dict with torch tensors. + state_dict = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) + + # Add what is missing. + if "encoder.embed_tokens.weight" not in state_dict: + state_dict["encoder.embed_tokens.weight"] = state_dict["shared.weight"] + + if not is_encoder_only: + if "decoder.embed_tokens.weight" not in state_dict: + state_dict["decoder.embed_tokens.weight"] = state_dict["shared.weight"] + + if "lm_head.weight" not in state_dict: # For old 1.0 models. + print("Using shared word embeddings as lm_head.") + state_dict["lm_head.weight"] = state_dict["shared.weight"] + + return state_dict + + +def load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention): + """Replaces the params in model witht the T5X converted params.""" + variables = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path) + converted = convert_t5x_to_pytorch( + variables, num_layers=config.num_layers, is_encoder_only=is_encoder_only, scalable_attention=scalable_attention + ) + state_dict = make_state_dict(converted, is_encoder_only) + model.load_state_dict(state_dict, strict=True) + + +def convert_t5x_checkpoint_to_pytorch( + t5x_checkpoint_path, + config_file, + pytorch_dump_path, + is_encoder_only: bool = False, + scalable_attention: bool = False, +): + """Loads the config and model, converts the T5X checkpoint, and saves a PyTorch checkpoint.""" + # Initialise PyTorch model + config = MT5Config.from_json_file(config_file) + print(f"Building PyTorch model from configuration: {config}") + # Non-v1.1 checkpoints could also use T5Model, but this works for all. + # The v1.0 checkpoints will simply have an LM head that is the word embeddings. + if is_encoder_only: + model = UMT5EncoderModel(config) + else: + model = UMT5ForConditionalGeneration(config) + + # Load weights from tf checkpoint + load_t5x_weights_in_t5(model, config, t5x_checkpoint_path, is_encoder_only, scalable_attention) + + # Save pytorch-model + print(f"Save PyTorch model to {pytorch_dump_path}") + model.save_pretrained(pytorch_dump_path) + + # Verify that we can load the checkpoint. + model.from_pretrained(pytorch_dump_path) + print("Done") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") + # Required parameters + parser.add_argument( + "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False + ) + parser.add_argument( + "--scalable_attention", + action="store_true", + help="Whether the model uses scaled attention (umt5 model)", + default=False, + ) + args = parser.parse_args() + convert_t5x_checkpoint_to_pytorch( + args.t5x_checkpoint_path, + args.config_file, + args.pytorch_dump_path, + args.is_encoder_only, + args.scalable_attention, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/umt5/modeling_umt5.py b/venv/lib/python3.10/site-packages/transformers/models/umt5/modeling_umt5.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf8469f77e66de71b2484805fdcd77f2e2338fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/umt5/modeling_umt5.py @@ -0,0 +1,1857 @@ +# coding=utf-8 +# Copyright 2023 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch UMT5 model.""" + +import copy +import math +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, + Seq2SeqQuestionAnsweringModelOutput, + Seq2SeqSequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + DUMMY_INPUTS, + DUMMY_MASK, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_torch_fx_proxy, + logging, + replace_return_docstrings, +) +from .configuration_umt5 import UMT5Config + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "UMT5Config" +_CHECKPOINT_FOR_DOC = "google/umt5-small" + + +# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->UMT5 +class UMT5LayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Construct a layernorm module in the UMT5 style. No bias and no subtraction of mean. + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + # UMT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean + # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated + # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for + # half-precision inputs is done in fp32 + + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + +# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->UMT5 +class UMT5DenseActDense(nn.Module): + def __init__(self, config: UMT5Config): + super().__init__() + self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) + self.dropout = nn.Dropout(config.dropout_rate) + self.act = ACT2FN[config.dense_act_fn] + + def forward(self, hidden_states): + hidden_states = self.wi(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.dropout(hidden_states) + if ( + isinstance(self.wo.weight, torch.Tensor) + and hidden_states.dtype != self.wo.weight.dtype + and self.wo.weight.dtype != torch.int8 + ): + hidden_states = hidden_states.to(self.wo.weight.dtype) + hidden_states = self.wo(hidden_states) + return hidden_states + + +# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->UMT5 +class UMT5DenseGatedActDense(nn.Module): + def __init__(self, config: UMT5Config): + super().__init__() + self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) + self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) + self.dropout = nn.Dropout(config.dropout_rate) + self.act = ACT2FN[config.dense_act_fn] + + def forward(self, hidden_states): + hidden_gelu = self.act(self.wi_0(hidden_states)) + hidden_linear = self.wi_1(hidden_states) + hidden_states = hidden_gelu * hidden_linear + hidden_states = self.dropout(hidden_states) + + # To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32. + # See https://github.com/huggingface/transformers/issues/20287 + # we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None`` + if ( + isinstance(self.wo.weight, torch.Tensor) + and hidden_states.dtype != self.wo.weight.dtype + and self.wo.weight.dtype != torch.int8 + ): + hidden_states = hidden_states.to(self.wo.weight.dtype) + + hidden_states = self.wo(hidden_states) + return hidden_states + + +# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->UMT5 +class UMT5LayerFF(nn.Module): + def __init__(self, config: UMT5Config): + super().__init__() + if config.is_gated_act: + self.DenseReluDense = UMT5DenseGatedActDense(config) + else: + self.DenseReluDense = UMT5DenseActDense(config) + + self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward(self, hidden_states): + forwarded_states = self.layer_norm(hidden_states) + forwarded_states = self.DenseReluDense(forwarded_states) + hidden_states = hidden_states + self.dropout(forwarded_states) + return hidden_states + + +class UMT5Attention(nn.Module): + """ + T5's attention using relative_attention_bias. + """ + + def __init__(self, config, has_relative_attention_bias=False): + super().__init__() + self.is_decoder = config.is_decoder + self.has_relative_attention_bias = has_relative_attention_bias + self.relative_attention_num_buckets = config.relative_attention_num_buckets + self.relative_attention_max_distance = config.relative_attention_max_distance + self.d_model = config.d_model + self.key_value_proj_dim = config.d_kv + self.n_heads = config.num_heads + self.dropout = config.dropout_rate + self.inner_dim = self.n_heads * self.key_value_proj_dim + + # Mesh TensorFlow initialization to avoid scaling before softmax + self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) + self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) + + if self.has_relative_attention_bias: + self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) + self.pruned_heads = set() + + def _shape(self, projection: torch.Tensor) -> torch.Tensor: + new_projection_shape = projection.size()[:-1] + (self.n_heads, self.key_value_proj_dim) + # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) -> (B, H, T, D) + new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) + return new_projection + + def _relative_position_bucket(self, relative_position): + """ + Adapted from Mesh Tensorflow: + https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 + + Translate relative position to a bucket number for relative attention. The relative position is defined as + memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to + position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for + small absolute relative_position and larger buckets for larger absolute relative_positions. All relative + positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. + This should allow for more graceful generalization to longer sequences than the model has been trained on + + Args: + relative_position: an int32 Tensor + bidirectional: a boolean - whether the attention is bidirectional + num_buckets: an integer + max_distance: an integer + + Returns: + a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) + """ + relative_buckets = 0 + num_buckets = self.relative_attention_num_buckets + max_distance = self.relative_attention_max_distance + if not self.is_decoder: + num_buckets //= 2 + relative_buckets += (relative_position > 0).to(torch.long) * num_buckets + relative_position = torch.abs(relative_position) + else: + relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) + # now relative_position is in the range [0, inf) + + # half of the buckets are for exact increments in positions + max_exact = num_buckets // 2 + is_small = relative_position < max_exact + + # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance + log_ratio = torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) + log_ratio = log_ratio * (num_buckets - max_exact) + relative_position_if_large = max_exact + log_ratio.to(torch.long) + relative_position_if_large = torch.min( + relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) + ) + + relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) + return relative_buckets + + def compute_bias(self, query_length, key_length, device=None): + """Compute binned relative position bias""" + if device is None: + device = self.relative_attention_bias.weight.device + context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] + relative_position = memory_position - context_position # shape (query_length, key_length) + relative_position_bucket = self._relative_position_bucket(relative_position) + values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) + return values + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + ): + is_cross_attention = encoder_hidden_states is not None + batch_size, seq_length = hidden_states.shape[:2] + + # use encoder_hidden_states if cross attention + current_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + # checking that the `sequence_length` of the `past_key_value` is the same as the he provided + # `encoder_hidden_states` to support prefix tuning + if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + else: + key_states = self._shape(self.k(current_states)) + value_states = self._shape(self.v(current_states)) + if past_key_value is not None and not is_cross_attention: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + query_states = self._shape(self.q(hidden_states)) + attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) + + # compute positional bias + if self.has_relative_attention_bias: + query_length = seq_length + if past_key_value is not None: + query_length += past_key_value[0].shape[2] + position_bias = self.compute_bias(query_length, key_states.size(2), device=attention_scores.device) + else: + position_bias = torch.zeros( + (1, self.n_heads, seq_length, key_states.size(2)), + device=attention_scores.device, + dtype=attention_scores.dtype, + requires_grad=self.training, + ) + if past_key_value is not None: + position_bias = position_bias[:, :, -hidden_states.size(1) :, :] + if attention_mask is not None: + position_bias = position_bias + attention_mask # (batch_size, n_heads, seq_length, key_length) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + attention_scores += position_bias + # (batch_size, n_heads, seq_length, key_length) + attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).type_as(attention_scores) + attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + # Mask heads if we want to + if layer_head_mask is not None: + attn_weights = attn_weights * layer_head_mask + + # attn_output = torch.bmm(attn_probs, value_states) ? + context_states = torch.matmul(attn_weights, value_states) + # attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) ? + context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1) + attn_output = self.o(context_states) + return attn_output, attn_weights, past_key_value + + +class UMT5LayerSelfAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.SelfAttention = UMT5Attention(config, has_relative_attention_bias=True) + self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward( + self, + hidden_states, + attention_mask=None, + layer_head_mask=None, + past_key_value=None, + ): + normed_hidden_states = self.layer_norm(hidden_states) + attention_output = self.SelfAttention( + normed_hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + past_key_value=past_key_value, + ) + hidden_states = hidden_states + self.dropout(attention_output[0]) + outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them + return outputs + + +class UMT5LayerCrossAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.EncDecAttention = UMT5Attention(config, has_relative_attention_bias=False) + self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + def forward( + self, + hidden_states, + encoder_hidden_states=None, + attention_mask=None, + layer_head_mask=None, + past_key_value=None, + ): + normed_hidden_states = self.layer_norm(hidden_states) + attention_output = self.EncDecAttention( + normed_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + past_key_value=past_key_value, + ) + layer_output = hidden_states + self.dropout(attention_output[0]) + outputs = (layer_output,) + attention_output[1:] # add attentions if we output them + return outputs + + +class UMT5Block(nn.Module): + def __init__(self, config): + super().__init__() + self.is_decoder = config.is_decoder + self.layer = nn.ModuleList() + self.layer.append(UMT5LayerSelfAttention(config)) + if self.is_decoder: + self.layer.append(UMT5LayerCrossAttention(config)) + + self.layer.append(UMT5LayerFF(config)) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + layer_head_mask=None, + cross_attn_layer_head_mask=None, + past_key_value=None, + use_cache=False, + output_attentions=False, + ): + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + + hidden_states, self_attn_weights, present_key_value = self.layer[0]( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + past_key_value=self_attn_past_key_value, + ) + + # clamp inf values to enable fp16 training + if hidden_states.dtype == torch.float16: + max_dtype = torch.finfo(hidden_states.dtype).max + clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + do_cross_attention = self.is_decoder and encoder_hidden_states is not None + if do_cross_attention: + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.layer[1]( + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + ) + # clamp inf values to enable fp16 training + if hidden_states.dtype == torch.float16: + max_dtype = torch.finfo(hidden_states.dtype).max + clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + present_key_value += cross_attn_present_key_value + + # Apply Feed Forward layer + hidden_states = self.layer[-1](hidden_states) + + # clamp inf values to enable fp16 training + if hidden_states.dtype == torch.float16: + max_dtype = torch.finfo(hidden_states.dtype).max + clamp_value = torch.where(torch.isinf(hidden_states).any(), max_dtype - 1000, max_dtype) + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = ( + hidden_states, + present_key_value, + ) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.t5.modeling_t5.T5ClassificationHead with T5->UMT5 +class UMT5ClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config: UMT5Config): + super().__init__() + self.dense = nn.Linear(config.d_model, config.d_model) + self.dropout = nn.Dropout(p=config.classifier_dropout) + self.out_proj = nn.Linear(config.d_model, config.num_labels) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class UMT5PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = UMT5Config + base_model_prefix = "transformer" + supports_gradient_checkpointing = True + _no_split_modules = ["UMT5Block"] + _keep_in_fp32_modules = ["wo"] + + @property + def dummy_inputs(self): + input_ids = torch.tensor(DUMMY_INPUTS) + input_mask = torch.tensor(DUMMY_MASK) + dummy_inputs = { + "decoder_input_ids": input_ids, + "input_ids": input_ids, + "decoder_attention_mask": input_mask, + } + return dummy_inputs + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor # Used for testing weights initialization + if isinstance(module, UMT5LayerNorm): + module.weight.data.fill_(factor * 1.0) + elif isinstance( + module, + ( + UMT5Model, + UMT5ForConditionalGeneration, + UMT5EncoderModel, + UMT5ForQuestionAnswering, + ), + ): + # Mesh TensorFlow embeddings initialization + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 + module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) + if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: + module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) + if hasattr(module, "qa_outputs"): + module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + module.qa_outputs.bias.data.zero_() + elif isinstance(module, UMT5ForTokenClassification): + if hasattr(module, "classifier"): + module.classifier.weight.data.normal_(mean=0.0, std=factor * 1.0) + module.classifier.bias.data.zero_() + elif isinstance(module, UMT5ClassificationHead): + module.dense.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.dense, "bias") and module.dense.bias is not None: + module.dense.bias.data.zero_() + module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None: + module.out_proj.bias.data.zero_() + elif isinstance(module, UMT5DenseActDense): + # Mesh TensorFlow FF initialization + # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 + # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 + module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.wi, "bias") and module.wi.bias is not None: + module.wi.bias.data.zero_() + module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) + if hasattr(module.wo, "bias") and module.wo.bias is not None: + module.wo.bias.data.zero_() + elif isinstance(module, UMT5DenseGatedActDense): + module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: + module.wi_0.bias.data.zero_() + module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: + module.wi_1.bias.data.zero_() + module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) + if hasattr(module.wo, "bias") and module.wo.bias is not None: + module.wo.bias.data.zero_() + elif isinstance(module, UMT5Attention): + # Mesh TensorFlow attention initialization to avoid scaling before softmax + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 + d_model = self.config.d_model + key_value_proj_dim = self.config.d_kv + n_heads = self.config.num_heads + module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) + module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) + if module.has_relative_attention_bias: + module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) + + def _shift_right(self, input_ids): + decoder_start_token_id = self.config.decoder_start_token_id + pad_token_id = self.config.pad_token_id + + if decoder_start_token_id is None: + raise ValueError( + "self.model.config.decoder_start_token_id has to be defined. In UMT5 it is usually set to the pad_token_id. " + "See UMT5 docs for more information." + ) + + # shift inputs to the right + if is_torch_fx_proxy(input_ids): + # Item assignment is not supported natively for proxies. + shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) + shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) + else: + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() + shifted_input_ids[..., 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +class UMT5Stack(UMT5PreTrainedModel): + def __init__(self, config, embed_tokens=None): + super().__init__(config) + self.embed_tokens = embed_tokens + self.is_decoder = config.is_decoder + self.block = nn.ModuleList([UMT5Block(config) for i in range(config.num_layers)]) + self.final_layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + self.dropout = nn.Dropout(config.dropout_rate) + + # Initialize weights and apply final processing + self.gradient_checkpointing = False + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, new_embeddings): + self.embed_tokens = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + inputs_embeds=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + err_msg_prefix = "decoder_" if self.is_decoder else "" + raise ValueError( + f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" + ) + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + err_msg_prefix = "decoder_" if self.is_decoder else "" + raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") + + if inputs_embeds is None: + if self.embed_tokens is None: + raise ValueError("You have to initialize the model with valid token embeddings") + inputs_embeds = self.embed_tokens(input_ids) + + batch_size, seq_length = input_shape + + # required mask seq length can be calculated via length of past + mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length + + if use_cache is True: + if not self.is_decoder: + raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") + + if attention_mask is None: + attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: + encoder_seq_length = encoder_hidden_states.shape[1] + encoder_attention_mask = torch.ones( + batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long + ) + + # initialize past_key_values with `None` if past does not exist + if past_key_values is None: + past_key_values = [None] * len(self.block) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # Prepare head mask if needed + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) + present_key_value_states = () if use_cache else None + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.is_decoder else None + + hidden_states = self.dropout(inputs_embeds) + + for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): + layer_head_mask = head_mask[i] + cross_attn_layer_head_mask = cross_attn_head_mask[i] + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.forward, + hidden_states, + extended_attention_mask, + encoder_hidden_states, + encoder_extended_attention_mask, + layer_head_mask, + cross_attn_layer_head_mask, + None, # past_key_value is always None with gradient checkpointing + use_cache, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask=extended_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + layer_head_mask=layer_head_mask, + cross_attn_layer_head_mask=cross_attn_layer_head_mask, + past_key_value=past_key_value, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + present_key_value_states += (layer_outputs[1],) + + if output_attentions: + all_attentions += (layer_outputs[2],) + if self.is_decoder: + all_cross_attentions += (layer_outputs[3],) + + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.dropout(hidden_states) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + present_key_value_states, + all_hidden_states, + all_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_value_states, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + +UMT5_START_DOCSTRING = r""" + + The UMT5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text + Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan + Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a + text-to-text denoising generative setting. + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`UMT5Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +UMT5_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so + you should be able to pad the inputs on both the right and the left. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for detail. + + [What are input IDs?](../glossary#input-ids) + + To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + UMT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + To know more on how to prepare `decoder_input_ids` for pretraining take a look at [UMT5 + Training](./umt5#training). + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in + `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at + the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +UMT5_ENCODER_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so + you should be able to pad the inputs on both the right and the left. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for detail. + + To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training). + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare UMT5 Model transformer outputting raw hidden-states without any specific head on top.", + UMT5_START_DOCSTRING, +) +class UMT5Model(UMT5PreTrainedModel): + r""" + Examples: + + ```python + >>> from transformers import UMT5Model, AutoTokenizer + + >>> model = UMT5Model.from_pretrained("google/umt5-small") + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> noisy_text = "UN Offizier sagt, dass weiter werden muss in Syrien." + >>> label = " verhandelt" + >>> inputs = tokenizer(inputs, return_tensors="pt") + >>> labels = tokenizer(label=label, return_tensors="pt") + + >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) + >>> hidden_states = outputs.last_hidden_state + ```""" + + model_type = "umt5" + config_class = UMT5Config + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config): + super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = UMT5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = UMT5Stack(decoder_config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.t5.modeling_t5.T5Model.get_input_embeddings + def get_input_embeddings(self): + return self.shared + + # Copied from transformers.models.t5.modeling_t5.T5Model.set_input_embeddings + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + self.decoder.set_input_embeddings(new_embeddings) + + # Copied from transformers.models.t5.modeling_t5.T5Model._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + + # Copied from transformers.models.t5.modeling_t5.T5Model.get_encoder + def get_encoder(self): + return self.encoder + + # Copied from transformers.models.t5.modeling_t5.T5Model.get_decoder + def get_decoder(self): + return self.decoder + + # Copied from transformers.models.t5.modeling_t5.T5Model._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, UMT5Model + + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> model = UMT5Model.from_pretrained("google/umt5-small") + + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 + + >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for UMT5Model. + >>> # This is not needed for torch's UMT5ForConditionalGeneration as it does this internally using labels arg. + >>> decoder_input_ids = model._shift_right(decoder_input_ids) + + >>> # forward pass + >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings("""UMT5 Model with a `language modeling` head on top.""", UMT5_START_DOCSTRING) +class UMT5ForConditionalGeneration(UMT5PreTrainedModel): + r""" + Examples: + + ```python + >>> from transformers import UMT5ForConditionalGeneration, AutoTokenizer + + >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." + >>> summary = "Weiter Verhandlung in Syrien." + >>> inputs = tokenizer(article, text_target=summary, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> loss = outputs.loss + ```""" + + model_type = "umt5" + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model_dim = config.d_model + + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = UMT5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = UMT5Stack(decoder_config, self.shared) + + self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_input_embeddings + def get_input_embeddings(self): + return self.shared + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_input_embeddings + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + self.decoder.set_input_embeddings(new_embeddings) + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_output_embeddings + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_encoder + def get_encoder(self): + return self.encoder + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.get_decoder + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., + config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for + labels in `[0, ..., config.vocab_size]` + + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, UMT5ForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> model = UMT5ForConditionalGeneration.from_pretrained("google/umt5-small") + + >>> # training + >>> input_ids = tokenizer("The walks in park", return_tensors="pt").input_ids + >>> labels = tokenizer(" cute dog the ", return_tensors="pt").input_ids + >>> outputs = model(input_ids=input_ids, labels=labels) + >>> loss = outputs.loss + >>> logits = outputs.logits + + >>> # inference + >>> input_ids = tokenizer("Studies have shown that good for you", return_tensors="pt").input_ids + >>> outputs = model.generate(input_ids) + >>> tokenizer.decode(outputs[0], skip_special_tokens=True) + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + # Convert encoder inputs in embeddings if needed + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: + # get decoder inputs from shifting lm labels to the right + decoder_input_ids = self._shift_right(labels) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = decoder_outputs[0] + + if self.config.tie_word_embeddings: + # Rescale output before projecting on vocab + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 + sequence_output = sequence_output * (self.model_dim**-0.5) + + lm_logits = self.lm_head(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-100) + # move labels to correct device to enable PP + labels = labels.to(lm_logits.device) + loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs + return ((loss,) + output) if loss is not None else output + + return Seq2SeqLMOutput( + loss=loss, + logits=lm_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + head_mask=None, + decoder_head_mask=None, + decoder_attention_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + return { + "decoder_input_ids": input_ids, + "past_key_values": past_key_values, + "encoder_outputs": encoder_outputs, + "attention_mask": attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "decoder_attention_mask": decoder_attention_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, + } + + # Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return self._shift_right(labels) + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + "The bare UMT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", + UMT5_START_DOCSTRING, +) +class UMT5EncoderModel(UMT5PreTrainedModel): + r""" + Examples: + + ```python + >>> from transformers import UMT5EncoderModel, AutoTokenizer + + >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." + >>> input_ids = tokenizer(article, return_tensors="pt").input_ids + >>> outputs = model(input_ids) + >>> hidden_state = outputs.last_hidden_state + ```""" + + model_type = "umt5" + # config_class = UMT5Config + _tied_weights_keys = ["encoder.embed_tokens.weight"] + + def __init__(self, config): + super().__init__(config) + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = UMT5Stack(encoder_config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_input_embeddings + def get_input_embeddings(self): + return self.shared + + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.set_input_embeddings + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.get_encoder + def get_encoder(self): + return self.encoder + + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(UMT5_ENCODER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + # Copied from transformers.models.t5.modeling_t5.T5EncoderModel.forward with T5->UMT5, google-t5/t5-small->google/umt5-small + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, UMT5EncoderModel + + >>> tokenizer = AutoTokenizer.from_pretrained("google/umt5-small") + >>> model = UMT5EncoderModel.from_pretrained("google/umt5-small") + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> outputs = model(input_ids=input_ids) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return encoder_outputs + + +@add_start_docstrings( + """ + UMT5 model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE + tasks. + """, + UMT5_START_DOCSTRING, +) +class UMT5ForSequenceClassification(UMT5PreTrainedModel): + _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + # Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->UMT5 + def __init__(self, config: UMT5Config): + super().__init__(config) + self.transformer = UMT5Model(config) + self.classification_head = UMT5ClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + self.model_parallel = False + + @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + if input_ids is None and inputs_embeds is not None: + raise NotImplementedError( + f"Passing input embeddings is currently not supported for {self.__class__.__name__}" + ) + + # Copied from models.bart.modeling_bart.BartModel.forward different to other models, T5 automatically creates + # decoder_input_ids from input_ids if no decoder_input_ids are provided + if decoder_input_ids is None and decoder_inputs_embeds is None: + if input_ids is None: + raise ValueError( + "If no `decoder_input_ids` or `decoder_inputs_embeds` are " + "passed, `input_ids` cannot be `None`. Please pass either " + "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." + ) + decoder_input_ids = self._shift_right(input_ids) + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + + eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device) + + if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: + raise ValueError("All examples must have the same number of tokens.") + batch_size, _, hidden_size = sequence_output.shape + sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :] + logits = self.classification_head(sentence_representation) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.config.num_labels == 1: + self.config.problem_type = "regression" + elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.config.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return Seq2SeqSequenceClassifierOutput( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + """ + UMT5 Encoder Model with a token classification head on top (a linear layer on top of the hidden-states output) + e.g. for Named-Entity-Recognition (NER) tasks. + """, + UMT5_START_DOCSTRING, +) +class UMT5ForTokenClassification(UMT5PreTrainedModel): + _keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"] + _tied_weights_keys = ["transformer.encoder.embed_tokens.weight"] + + # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.__init__ with T5->UMT5 + def __init__(self, config: UMT5Config): + super().__init__(config) + self.num_labels = config.num_labels + + self.transformer = UMT5EncoderModel(config) + self.dropout = nn.Dropout(config.classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) + # Copied from transformers.models.t5.modeling_t5.T5ForTokenClassification.forward with T5->UMT5 + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits, outputs[2:-1]) + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + UMT5 Model with a span classification head on top for extractive question-answering tasks like SQuAD (linear layers + on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + UMT5_START_DOCSTRING, +) +class UMT5ForQuestionAnswering(UMT5PreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config): + super().__init__(config) + self.model_dim = config.d_model + + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = UMT5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = UMT5Stack(decoder_config, self.shared) + + self.num_labels = config.num_labels + self.qa_outputs = nn.Linear(config.d_model, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_input_embeddings + def get_input_embeddings(self): + return self.shared + + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.set_input_embeddings + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.set_input_embeddings(new_embeddings) + self.decoder.set_input_embeddings(new_embeddings) + + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering._tie_weights + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) + self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) + + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_encoder + def get_encoder(self): + return self.encoder + + # Copied from transformers.models.t5.modeling_t5.T5ForQuestionAnswering.get_decoder + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(UMT5_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], Seq2SeqQuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence + are not taken into account for computing the loss. + Returns: + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + use_cache = use_cache if use_cache is not None else self.config.use_cache + if start_positions is not None and end_positions is not None: + use_cache = False + + # Copied from models.bart.modeling_bart.BartModel.forward + # different to other models, T5 automatically creates decoder_input_ids from + # input_ids if no decoder_input_ids are provided + if decoder_input_ids is None and decoder_inputs_embeds is None: + if input_ids is None: + raise ValueError( + "If no `decoder_input_ids` or `decoder_inputs_embeds` are " + "passed, `input_ids` cannot be `None`. Please pass either " + "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." + ) + decoder_input_ids = self._shift_right(input_ids) + + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=None, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = decoder_outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1).to(start_logits.device) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1).to(end_logits.device) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + decoder_outputs[1:] + encoder_outputs + return ((total_loss,) + output) if total_loss is not None else output + + return Seq2SeqQuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + )