diff --git a/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..22285e02f64cbc1ad19696bde224de07335a541c --- /dev/null +++ b/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:deac2a55fcd86f5cc694c603b449726cab93ec94aed43ae043638f87f246f947 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..d7b0cd747bc57e78fd9b761f92c1f6f85779b525 --- /dev/null +++ b/ckpts/universal/global_step20/zero/19.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d183029d9400f181c59684ec18be25a2b52abdeeae1b1fcb089b165c71c7d80 +size 33555627 diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..7db00bf672984ee904b90611f37383d3140dcefa --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/output.log @@ -0,0 +1,28 @@ + +2024-05-14:16:34:29,286 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:34:33,982 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:34:33,984 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:34:33,984 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'} +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.) + return func(*args, **kwargs) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +[2024-05-14 16:34:41,205] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect) +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead. + warnings.warn( +You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 +2024-05-14:16:34:41,615 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-14:16:34:41,616 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +2024-05-14:16:34:42,901 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:34:42,901 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended. +2024-05-14:16:34:42,920 INFO [task.py:395] Building contexts for indiccopa-hi on rank 3... +Passed argument batch_size = auto:1. Detecting largest batch size +100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 107991.28it/s] +2024-05-14:16:34:44,718 INFO [evaluator.py:379] Running loglikelihood requests +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Determined largest batch size: 64 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..afc5c180b7a17ef9fc6dcf426dc1393566cf24d4 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:34:29.154200", + "startedAt": "2024-05-14T16:34:28.708355", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3396.061605263158, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3280.656, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3284.865, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3223.887, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3244.473, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3291.536, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3206.218, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3220.102, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.5445137023926 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e776ad5e9dd16ad8bbb479aaf2ef49394318a964 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_163428-dlwc10vq/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 23}} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/altclip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc02b192b256b620d9e590a22ff0e1ca8dbd6d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/altclip/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available + + +_import_structure = { + "configuration_altclip": [ + "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "AltCLIPConfig", + "AltCLIPTextConfig", + "AltCLIPVisionConfig", + ], + "processing_altclip": ["AltCLIPProcessor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_altclip"] = [ + "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "AltCLIPPreTrainedModel", + "AltCLIPModel", + "AltCLIPTextModel", + "AltCLIPVisionModel", + ] + + +if TYPE_CHECKING: + from .configuration_altclip import ( + ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + AltCLIPConfig, + AltCLIPTextConfig, + AltCLIPVisionConfig, + ) + from .processing_altclip import AltCLIPProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_altclip import ( + ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + AltCLIPModel, + AltCLIPPreTrainedModel, + AltCLIPTextModel, + AltCLIPVisionModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py b/venv/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py new file mode 100644 index 0000000000000000000000000000000000000000..590f2b526e8c4b9f074ce96605e556e6a1f2c6ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/altclip/configuration_altclip.py @@ -0,0 +1,402 @@ +# coding=utf-8 +# Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AltCLIP model configuration""" +import os +from typing import Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class AltCLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a + AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the AltCLIP + [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 250002): + Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`AltCLIPTextModel`]. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 514): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 1): + The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`] + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 0.02): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token. + bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token. + eos_token_id (`Union[int, List[int]]`, *optional*, defaults to 2): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + project_dim (`int`, *optional*, defaults to 768): + The dimentions of the teacher model before the mapping layer. + + Examples: + + ```python + >>> from transformers import AltCLIPTextModel, AltCLIPTextConfig + + >>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration + >>> configuration = AltCLIPTextConfig() + + >>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration + >>> model = AltCLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "altclip_text_model" + + def __init__( + self, + vocab_size=250002, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=514, + type_vocab_size=1, + initializer_range=0.02, + initializer_factor=0.02, + layer_norm_eps=1e-05, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + position_embedding_type="absolute", + use_cache=True, + project_dim=768, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.project_dim = project_dim + + +class AltCLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an + AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the AltCLIP + [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 32): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float`, *optional*, defaults to 1.0): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + + Example: + + ```python + >>> from transformers import AltCLIPVisionConfig, AltCLIPVisionModel + + >>> # Initializing a AltCLIPVisionConfig with BAAI/AltCLIP style configuration + >>> configuration = AltCLIPVisionConfig() + + >>> # Initializing a AltCLIPVisionModel (with random weights) from the BAAI/AltCLIP style configuration + >>> model = AltCLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "altclip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + projection_dim=512, + num_hidden_layers=12, + num_attention_heads=12, + num_channels=3, + image_size=224, + patch_size=32, + hidden_act="quick_gelu", + layer_norm_eps=1e-5, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.projection_dim = projection_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from AltCLIPConfig + if config_dict.get("model_type") == "altclip": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class AltCLIPConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`AltCLIPModel`]. It is used to instantiate an + AltCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the AltCLIP + [BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`AltCLIPTextConfig`]. + vision_config (`dict`, *optional*): + Dictionary of configuration options used to initialize [`AltCLIPVisionConfig`]. + projection_dim (`int`, *optional*, defaults to 768): + Dimentionality of text and vision projection layers. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation. + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import AltCLIPConfig, AltCLIPModel + + >>> # Initializing a AltCLIPConfig with BAAI/AltCLIP style configuration + >>> configuration = AltCLIPConfig() + + >>> # Initializing a AltCLIPModel (with random weights) from the BAAI/AltCLIP style configuration + >>> model = AltCLIPModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a AltCLIPConfig from a AltCLIPTextConfig and a AltCLIPVisionConfig + + >>> # Initializing a AltCLIPText and AltCLIPVision configuration + >>> config_text = AltCLIPTextConfig() + >>> config_vision = AltCLIPVisionConfig() + + >>> config = AltCLIPConfig.from_text_vision_configs(config_text, config_vision) + ```""" + + model_type = "altclip" + + def __init__( + self, text_config=None, vision_config=None, projection_dim=768, logit_scale_init_value=2.6592, **kwargs + ): + # If `_config_dict` exist, we use them for the backward compatibility. + # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot + # of confusion!). + text_config_dict = kwargs.pop("text_config_dict", None) + vision_config_dict = kwargs.pop("vision_config_dict", None) + + super().__init__(**kwargs) + + # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in + # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most + # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. + if text_config_dict is not None: + if text_config is None: + text_config = {} + + # This is the complete result when using `text_config_dict`. + _text_config_dict = AltCLIPTextConfig(**text_config_dict).to_dict() + + # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. + for key, value in _text_config_dict.items(): + if key in text_config and value != text_config[key] and key not in ["transformers_version"]: + # If specified in `text_config_dict` + if key in text_config_dict: + message = ( + f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " + f'The value `text_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The " + f'value `text_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `text_config` with the ones in `_text_config_dict`. + text_config.update(_text_config_dict) + + if vision_config_dict is not None: + if vision_config is None: + vision_config = {} + + # This is the complete result when using `vision_config_dict`. + _vision_config_dict = AltCLIPVisionConfig(**vision_config_dict).to_dict() + # convert keys to string instead of integer + if "id2label" in _vision_config_dict: + _vision_config_dict["id2label"] = { + str(key): value for key, value in _vision_config_dict["id2label"].items() + } + + # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. + for key, value in _vision_config_dict.items(): + if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: + # If specified in `vision_config_dict` + if key in vision_config_dict: + message = ( + f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different " + f'values. The value `vision_config_dict["{key}"]` will be used instead.' + ) + # If inferred from default argument values (just to be super careful) + else: + message = ( + f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. " + f'The value `vision_config["{key}"]` will be overriden.' + ) + logger.info(message) + + # Update all values in `vision_config` with the ones in `_vision_config_dict`. + vision_config.update(_vision_config_dict) + + if text_config is None: + text_config = {} + logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.") + + if vision_config is None: + vision_config = {} + logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.") + + self.text_config = AltCLIPTextConfig(**text_config) + self.vision_config = AltCLIPVisionConfig(**vision_config) + + self.projection_dim = projection_dim + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = 1.0 + + @classmethod + def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: AltCLIPVisionConfig, **kwargs): + r""" + Instantiate a [`AltCLIPConfig`] (or a derived class) from altclip text model configuration and altclip vision + model configuration. + + Returns: + [`AltCLIPConfig`]: An instance of a configuration object + """ + + return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py b/venv/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py new file mode 100644 index 0000000000000000000000000000000000000000..0d27d87de7f4f1dba41787958db7509cb86f5ad9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/altclip/modeling_altclip.py @@ -0,0 +1,1693 @@ +# coding=utf-8 +# Copyright 2022 The BAAI Teams Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch AltCLIP model.""" +import math +from dataclasses import dataclass +from typing import Any, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPooling, + BaseModelOutputWithPoolingAndCrossAttentions, + BaseModelOutputWithPoolingAndProjection, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "BAAI/AltCLIP" +_CONFIG_FOR_DOC = "AltCLIPConfig" + + +from ..deprecated._archive_maps import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +ALTCLIP_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ALTCLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALTCLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALTCLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +def clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP +class AltCLIPOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`]. + text_model_output(`BaseModelOutputWithPooling`): + The output of the [`AltCLIPTextModel`]. + vision_model_output(`BaseModelOutputWithPooling`): + The output of the [`AltCLIPVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPooling = None + vision_model_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->AltRoberta +class AltRobertaEmbeddings(nn.Module): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + # End copy + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + + def forward( + self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->AltRoberta +class AltRobertaSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in AltRobertaModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput +class AltRobertaSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->AltRoberta +class AltRobertaAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = AltRobertaSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = AltRobertaSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->AltRoberta +class AltRobertaIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaOutput +class AltRobertaOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->AltRoberta +class AltRobertaLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = AltRobertaAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = AltRobertaAttention(config, position_embedding_type="absolute") + self.intermediate = AltRobertaIntermediate(config) + self.output = AltRobertaOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->AltRoberta +class AltRobertaEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([AltRobertaLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaPooler +class AltRobertaPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->AltCLIP +class AltCLIPAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->AltCLIP +class AltCLIPMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->AltCLIP +class AltCLIPEncoderLayer(nn.Module): + def __init__(self, config: AltCLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = AltCLIPAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.mlp = AltCLIPMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->AltCLIP +class AltCLIPEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`AltCLIPEncoderLayer`]. + + Args: + config: AltCLIPConfig + """ + + def __init__(self, config: AltCLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->AltCLIP +class AltCLIPVisionEmbeddings(nn.Module): + def __init__(self, config: AltCLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=config.num_channels, + out_channels=self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + bias=False, + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +class AltCLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = AltCLIPConfig + base_model_prefix = "altclip" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, AltCLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, AltCLIPAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, AltCLIPMLP): + factor = self.config.initializer_factor + in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, AltCLIPModel): + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim**-0.5 * self.config.initializer_factor, + ) + module.text_projection._is_hf_initialized = True + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, + ) + module.visual_projection._is_hf_initialized = True + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer with CLIPVisionTransformer->AltCLIPVisionTransformer,CLIPVisionConfig->AltCLIPVisionConfig,CLIPVisionEmbeddings->AltCLIPVisionEmbeddings,CLIPEncoder->AltCLIPEncoder,CLIP_VISION_INPUTS_DOCSTRING->ALTCLIP_VISION_INPUTS_DOCSTRING +class AltCLIPVisionTransformer(nn.Module): + def __init__(self, config: AltCLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = AltCLIPVisionEmbeddings(config) + self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = AltCLIPEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + + @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layrnorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class AltCLIPVisionModel(AltCLIPPreTrainedModel): + config_class = AltCLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: AltCLIPVisionConfig): + super().__init__(config) + self.vision_model = AltCLIPVisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AltCLIPVisionModel + + >>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP") + >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class AltRobertaModel(AltCLIPPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in *Attention is + all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz + Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + + .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 + + """ + + config_class = AltCLIPTextConfig + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->AltRoberta + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = AltRobertaEmbeddings(config) + self.encoder = AltRobertaEncoder(config) + + self.pooler = AltRobertaPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + # Copied from transformers.models.bert.modeling_bert.BertModel.forward + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class AltCLIPTextModel(AltCLIPPreTrainedModel): + config_class = AltCLIPTextConfig + + def __init__(self, config): + super().__init__(config) + self.roberta = AltRobertaModel(config, add_pooling_layer=False) + self.transformation = nn.Linear(config.hidden_size, config.project_dim) + self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.roberta.embeddings.word_embeddings + + def set_input_embeddings(self, value: nn.Embedding) -> None: + self.roberta.embeddings.word_embeddings = value + + def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: + return super().resize_token_embeddings(new_num_tokens) + + @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndProjection, config_class=AltCLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, AltCLIPTextModel + + >>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP") + >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") + + >>> texts = ["it's a cat", "it's a dog"] + + >>> inputs = processor(text=texts, padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + # last module outputs + sequence_output = outputs[0] + + # project every module + sequence_output = self.pre_LN(sequence_output) + + # pooler + projection_state = self.transformation(sequence_output) + pooler_output = projection_state[:, 0] + + if not return_dict: + return (projection_state, pooler_output) + outputs[2:4] + + return BaseModelOutputWithPoolingAndProjection( + last_hidden_state=projection_state, + pooler_output=pooler_output, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class AltCLIPModel(AltCLIPPreTrainedModel): + config_class = AltCLIPConfig + + def __init__(self, config: AltCLIPConfig): + super().__init__(config) + + if not isinstance(config.vision_config, AltCLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type AltCLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + if not isinstance(config.text_config, AltCLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type AltCLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.project_dim + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = AltCLIPTextModel(text_config) + self.vision_model = AltCLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + token_type_ids=None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`AltCLIPTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoProcessor, AltCLIPModel + + >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") + >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") + >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + ```""" + # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + token_type_ids=token_type_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooled_output = text_outputs[1] + text_features = self.text_projection(pooled_output) + + return text_features + + @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`AltCLIPVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AltCLIPModel + + >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") + >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> inputs = processor(images=image, return_tensors="pt") + >>> image_features = model.get_image_features(**inputs) + ```""" + # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = vision_outputs[1] # pooled_output + image_features = self.visual_projection(pooled_output) + + return image_features + + @add_start_docstrings_to_model_forward(ALTCLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=AltCLIPOutput, config_class=AltCLIPConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, AltCLIPOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AltCLIPModel + + >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") + >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True + ... ) + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + image_embeds = self.visual_projection(image_embeds) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale + logits_per_image = logits_per_text.T + + loss = None + if return_loss: + loss = clip_loss(logits_per_text) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return AltCLIPOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx diff --git a/venv/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py b/venv/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py new file mode 100644 index 0000000000000000000000000000000000000000..9518c55d40eadcd01c6523c462c7b8446f7bcc33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/altclip/processing_altclip.py @@ -0,0 +1,131 @@ +# coding=utf-8 +# Copyright 2022 WenXiang ZhongzhiCheng LedellWu LiuGuang BoWenZhang The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for AltCLIP +""" +import warnings + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class AltCLIPProcessor(ProcessorMixin): + r""" + Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single + processor. + + [`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See + the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information. + + Args: + image_processor ([`CLIPImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`XLMRobertaTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "CLIPImageProcessor" + tokenizer_class = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + feature_extractor = None + if "feature_extractor" in kwargs: + warnings.warn( + "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" + " instead.", + FutureWarning, + ) + feature_extractor = kwargs.pop("feature_extractor") + + image_processor = image_processor if image_processor is not None else feature_extractor + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + + def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to XLMRobertaTokenizerFast's [`~XLMRobertaTokenizerFast.__call__`] if `text` is not + `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring + of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + + if text is None and images is None: + raise ValueError("You have to specify either text or images. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + + if images is not None: + image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. + Please refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @property + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/cpm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..be6b0f66898ecbef786f311097f6a49c676762bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cpm/__init__.py @@ -0,0 +1,59 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available + + +_import_structure = {} + +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_cpm"] = ["CpmTokenizer"] + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_cpm_fast"] = ["CpmTokenizerFast"] + + +if TYPE_CHECKING: + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_cpm import CpmTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_cpm_fast import CpmTokenizerFast + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1b9ba8912c935ce5b529b343d95283bed53612b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efe52de3eb94be659bc51114dd4076d3c840e893 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bc1a4e0136439e9c92f5d9296cc6d402b75c03d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cpm/__pycache__/tokenization_cpm_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py b/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py new file mode 100644 index 0000000000000000000000000000000000000000..ac454898b5572a1bd9f1143d25ba52558f5b5ac9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm.py @@ -0,0 +1,344 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes.""" +import os +import unicodedata +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +import sentencepiece as spm + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import SPIECE_UNDERLINE, logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} + + +class CpmTokenizer(PreTrainedTokenizer): + """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models.""" + + vocab_files_names = VOCAB_FILES_NAMES + + def __init__( + self, + vocab_file, + do_lower_case=False, + remove_space=True, + keep_accents=False, + bos_token="", + eos_token="", + unk_token="", + sep_token="", + pad_token="", + cls_token="", + mask_token="", + additional_special_tokens=["", ""], + sp_model_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + """ + Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and + [SentencePiece](https://github.com/google/sentencepiece). + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that + contains the vocabulary necessary to instantiate a tokenizer. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether to lowercase the input when tokenizing. + remove_space (`bool`, *optional*, defaults to `True`): + Whether to strip the text when tokenizing (removing excess spaces before and after the string). + keep_accents (`bool`, *optional*, defaults to `False`): + Whether to keep accents when tokenizing. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier + token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of + sequence. The token used is the `sep_token`. + + + + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be + this token instead. + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences + for sequence classification or for a text and a question for question answering. It is also used as the + last token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole + sequence instead of per-token classification). It is the first token of the sequence when built with + special tokens. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`): + Additional special tokens used by the tokenizer. + + Attributes: + sp_model (`SentencePieceProcessor`): + The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). + """ + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.keep_accents = keep_accents + self.vocab_file = vocab_file + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + + try: + import jieba + except ModuleNotFoundError as error: + raise error.__class__( + "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " + "See https://pypi.org/project/jieba/ for installation." + ) + self.jieba = jieba + self.translator = str.maketrans(" \n", "\u2582\u2583") + + super().__init__( + do_lower_case=do_lower_case, + remove_space=remove_space, + keep_accents=keep_accents, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + additional_special_tokens=additional_special_tokens, + sp_model_kwargs=self.sp_model_kwargs, + **kwargs, + ) + + self._pad_token_type_id = 3 + + @property + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size + def vocab_size(self): + return len(self.sp_model) + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_vocab + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__getstate__ + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.__setstate__ + def __setstate__(self, d): + self.__dict__ = d + + # for backward compatibility + if not hasattr(self, "sp_model_kwargs"): + self.sp_model_kwargs = {} + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.preprocess_text + def preprocess_text(self, inputs): + if self.remove_space: + outputs = " ".join(inputs.strip().split()) + else: + outputs = inputs + outputs = outputs.replace("``", '"').replace("''", '"') + + if not self.keep_accents: + outputs = unicodedata.normalize("NFKD", outputs) + outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._tokenize + def _tokenize(self, text: str) -> List[str]: + """Tokenize a string.""" + text = self.preprocess_text(text) + pieces = self.sp_model.encode(text, out_type=str) + new_pieces = [] + for piece in pieces: + if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): + cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) + if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: + if len(cur_pieces[0]) == 1: + cur_pieces = cur_pieces[1:] + else: + cur_pieces[0] = cur_pieces[0][1:] + cur_pieces.append(piece[-1]) + new_pieces.extend(cur_pieces) + else: + new_pieces.append(piece) + + return new_pieces + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.PieceToId(token) + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.sp_model.IdToPiece(index) + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (strings for sub-words) in a single string.""" + out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() + return out_string + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An XLNet sequence has the following format: + + - single sequence: `X ` + - pair of sequences: `A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return token_ids_0 + sep + cls + return token_ids_0 + sep + token_ids_1 + sep + cls + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1] + return ([0] * len(token_ids_0)) + [1, 1] + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls_segment_id = [2] + + if token_ids_1 is None: + return len(token_ids_0 + sep) * [0] + cls_segment_id + return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id + + # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def _decode(self, *args, **kwargs): + text = super()._decode(*args, **kwargs) + text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") + return text diff --git a/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py b/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7b6da118ab4beb162f5e73e7a2cd2d12366997 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cpm/tokenization_cpm_fast.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes.""" +import os +from shutil import copyfile +from typing import List, Optional, Tuple + +from ...tokenization_utils_fast import AddedToken, PreTrainedTokenizerFast +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} + + +class CpmTokenizerFast(PreTrainedTokenizerFast): + """Runs pre-tokenization with Jieba segmentation tool. It is used in CPM models.""" + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=False, + remove_space=True, + keep_accents=False, + bos_token="", + eos_token="", + unk_token="", + sep_token="", + pad_token="", + cls_token="", + mask_token="", + additional_special_tokens=["", ""], + **kwargs, + ): + """ + Construct a CPM tokenizer. Based on [Jieba](https://pypi.org/project/jieba/) and + [SentencePiece](https://github.com/google/sentencepiece). + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that + contains the vocabulary necessary to instantiate a tokenizer. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether to lowercase the input when tokenizing. + remove_space (`bool`, *optional*, defaults to `True`): + Whether to strip the text when tokenizing (removing excess spaces before and after the string). + keep_accents (`bool`, *optional*, defaults to `False`): + Whether to keep accents when tokenizing. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier + token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of + sequence. The token used is the `sep_token`. + + + + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be + this token instead. + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences + for sequence classification or for a text and a question for question answering. It is also used as the + last token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole + sequence instead of per-token classification). It is the first token of the sequence when built with + special tokens. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`): + Additional special tokens used by the tokenizer. + + Attributes: + sp_model (`SentencePieceProcessor`): + The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). + """ + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + super().__init__( + vocab_file=vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + remove_space=remove_space, + keep_accents=keep_accents, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + additional_special_tokens=additional_special_tokens, + **kwargs, + ) + + self._pad_token_type_id = 3 + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.keep_accents = keep_accents + self.vocab_file = vocab_file + + try: + import jieba + except ModuleNotFoundError as error: + raise error.__class__( + "You need to install jieba to use CpmTokenizer or CpmTokenizerFast. " + "See https://pypi.org/project/jieba/ for installation." + ) + self.jieba = jieba + self.translator = str.maketrans(" \n", "\u2582\u2583") + + @property + def can_save_slow_tokenizer(self) -> bool: + return os.path.isfile(self.vocab_file) if self.vocab_file else False + + # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An XLNet sequence has the following format: + + - single sequence: `X ` + - pair of sequences: `A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return token_ids_0 + sep + cls + return token_ids_0 + sep + token_ids_1 + sep + cls + + # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls_segment_id = [2] + + if token_ids_1 is None: + return len(token_ids_0 + sep) * [0] + cls_segment_id + return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id + + # Copied from transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not self.can_save_slow_tokenizer: + raise ValueError( + "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " + "tokenizer." + ) + + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) + + def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs): + batch_text_or_text_pairs = [ + " ".join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)]) + for text in batch_text_or_text_pairs + ] + return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs) + + def _decode(self, *args, **kwargs): + text = super()._decode(*args, **kwargs) + text = text.replace(" ", "").replace("\u2582", " ").replace("\u2583", "\n") + return text diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a560265f4bfcb8d43f88d2b3cd55f751409016ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__init__.py @@ -0,0 +1,75 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"] + _import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_deformable_detr"] = [ + "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "DeformableDetrForObjectDetection", + "DeformableDetrModel", + "DeformableDetrPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor + from .image_processing_deformable_detr import DeformableDetrImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_deformable_detr import ( + DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + DeformableDetrForObjectDetection, + DeformableDetrModel, + DeformableDetrPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f949230366032763669be36d2d0bcc66d2ff0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05efef7f432fa537295146b3345f1bee226d4cf8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/configuration_deformable_detr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aac6767ed20e1f025df4f9dc978873abf0d86b59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/convert_deformable_detr_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb6602af5f688bc16edca025edb7d19875cd1e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/feature_extraction_deformable_detr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5acb0c385972c01c82024dd41c8b4aa6ebcc711c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/image_processing_deformable_detr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6a938685171bac6df020431d25dacd37a6147f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/load_custom.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..857b14d3c477ce44b048edfc86809d17257fcfe9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/__pycache__/modeling_deformable_detr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py new file mode 100644 index 0000000000000000000000000000000000000000..6d32f6220df5868a7d7aca620444306f5ad33ba5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -0,0 +1,277 @@ +# coding=utf-8 +# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Deformable DETR model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class DeformableDetrConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate + a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Deformable DETR + [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + use_timm_backbone (`bool`, *optional*, defaults to `True`): + Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] + API. + backbone_config (`PretrainedConfig` or `dict`, *optional*): + The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which + case it will default to `ResNetConfig()`. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + num_queries (`int`, *optional*, defaults to 300): + Number of object queries, i.e. detection slots. This is the maximal number of objects + [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use + `two_stage_num_proposals` instead. + d_model (`int`, *optional*, defaults to 256): + Dimension of the layers. + encoder_layers (`int`, *optional*, defaults to 6): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 6): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 1024): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 1024): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + init_xavier_std (`float`, *optional*, defaults to 1): + The scaling factor used for the Xavier initialization gain in the HM Attention map module. + encoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + auxiliary_loss (`bool`, *optional*, defaults to `False`): + Whether auxiliary decoding losses (loss at each decoder layer) are to be used. + position_embedding_type (`str`, *optional*, defaults to `"sine"`): + Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. + backbone (`str`, *optional*, defaults to `"resnet50"`): + Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this + will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` + is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. + use_pretrained_backbone (`bool`, *optional*, defaults to `True`): + Whether to use pretrained weights for the backbone. + backbone_kwargs (`dict`, *optional*): + Keyword arguments to be passed to AutoBackbone when loading from a checkpoint + e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. + dilation (`bool`, *optional*, defaults to `False`): + Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when + `use_timm_backbone` = `True`. + class_cost (`float`, *optional*, defaults to 1): + Relative weight of the classification error in the Hungarian matching cost. + bbox_cost (`float`, *optional*, defaults to 5): + Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. + giou_cost (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. + mask_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the Focal loss in the panoptic segmentation loss. + dice_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. + bbox_loss_coefficient (`float`, *optional*, defaults to 5): + Relative weight of the L1 bounding box loss in the object detection loss. + giou_loss_coefficient (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss in the object detection loss. + eos_coefficient (`float`, *optional*, defaults to 0.1): + Relative classification weight of the 'no-object' class in the object detection loss. + num_feature_levels (`int`, *optional*, defaults to 4): + The number of input feature levels. + encoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the encoder. + decoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the decoder. + two_stage (`bool`, *optional*, defaults to `False`): + Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of + Deformable DETR, which are further fed into the decoder for iterative bounding box refinement. + two_stage_num_proposals (`int`, *optional*, defaults to 300): + The number of region proposals to be generated, in case `two_stage` is set to `True`. + with_box_refine (`bool`, *optional*, defaults to `False`): + Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes + based on the predictions from the previous layer. + focal_alpha (`float`, *optional*, defaults to 0.25): + Alpha parameter in the focal loss. + disable_custom_kernels (`bool`, *optional*, defaults to `False`): + Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom + kernels are not supported by PyTorch ONNX export. + + Examples: + + ```python + >>> from transformers import DeformableDetrConfig, DeformableDetrModel + + >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration + >>> configuration = DeformableDetrConfig() + + >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration + >>> model = DeformableDetrModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "deformable_detr" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + } + + def __init__( + self, + use_timm_backbone=True, + backbone_config=None, + num_channels=3, + num_queries=300, + max_position_embeddings=1024, + encoder_layers=6, + encoder_ffn_dim=1024, + encoder_attention_heads=8, + decoder_layers=6, + decoder_ffn_dim=1024, + decoder_attention_heads=8, + encoder_layerdrop=0.0, + is_encoder_decoder=True, + activation_function="relu", + d_model=256, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + init_xavier_std=1.0, + return_intermediate=True, + auxiliary_loss=False, + position_embedding_type="sine", + backbone="resnet50", + use_pretrained_backbone=True, + backbone_kwargs=None, + dilation=False, + num_feature_levels=4, + encoder_n_points=4, + decoder_n_points=4, + two_stage=False, + two_stage_num_proposals=300, + with_box_refine=False, + class_cost=1, + bbox_cost=5, + giou_cost=2, + mask_loss_coefficient=1, + dice_loss_coefficient=1, + bbox_loss_coefficient=5, + giou_loss_coefficient=2, + eos_coefficient=0.1, + focal_alpha=0.25, + disable_custom_kernels=False, + **kwargs, + ): + if not use_timm_backbone and use_pretrained_backbone: + raise ValueError( + "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`" + ) + + if backbone_config is not None and backbone is not None: + raise ValueError("You can't specify both `backbone` and `backbone_config`.") + + if backbone_config is not None and use_timm_backbone: + raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") + + if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: + raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + + if not use_timm_backbone: + if backbone_config is None: + logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") + backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + self.use_timm_backbone = use_timm_backbone + self.backbone_config = backbone_config + self.num_channels = num_channels + self.num_queries = num_queries + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.init_xavier_std = init_xavier_std + self.encoder_layerdrop = encoder_layerdrop + self.auxiliary_loss = auxiliary_loss + self.position_embedding_type = position_embedding_type + self.backbone = backbone + self.use_pretrained_backbone = use_pretrained_backbone + self.backbone_kwargs = backbone_kwargs + self.dilation = dilation + # deformable attributes + self.num_feature_levels = num_feature_levels + self.encoder_n_points = encoder_n_points + self.decoder_n_points = decoder_n_points + self.two_stage = two_stage + self.two_stage_num_proposals = two_stage_num_proposals + self.with_box_refine = with_box_refine + if two_stage is True and with_box_refine is False: + raise ValueError("If two_stage is True, with_box_refine must be True.") + # Hungarian matcher + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + # Loss coefficients + self.mask_loss_coefficient = mask_loss_coefficient + self.dice_loss_coefficient = dice_loss_coefficient + self.bbox_loss_coefficient = bbox_loss_coefficient + self.giou_loss_coefficient = giou_loss_coefficient + self.eos_coefficient = eos_coefficient + self.focal_alpha = focal_alpha + self.disable_custom_kernels = disable_custom_kernels + super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + @property + def num_attention_heads(self) -> int: + return self.encoder_attention_heads + + @property + def hidden_size(self) -> int: + return self.d_model diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..928fa368ed34c2d3f59baa8038aded596df2d58e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Deformable DETR checkpoints.""" + + +import argparse +import json +from pathlib import Path + +import requests +import torch +from huggingface_hub import cached_download, hf_hub_url +from PIL import Image + +from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def rename_key(orig_key): + if "backbone.0.body" in orig_key: + orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model") + if "transformer" in orig_key: + orig_key = orig_key.replace("transformer.", "") + if "norm1" in orig_key: + if "encoder" in orig_key: + orig_key = orig_key.replace("norm1", "self_attn_layer_norm") + else: + orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm") + if "norm2" in orig_key: + if "encoder" in orig_key: + orig_key = orig_key.replace("norm2", "final_layer_norm") + else: + orig_key = orig_key.replace("norm2", "self_attn_layer_norm") + if "norm3" in orig_key: + orig_key = orig_key.replace("norm3", "final_layer_norm") + if "linear1" in orig_key: + orig_key = orig_key.replace("linear1", "fc1") + if "linear2" in orig_key: + orig_key = orig_key.replace("linear2", "fc2") + if "query_embed" in orig_key: + orig_key = orig_key.replace("query_embed", "query_position_embeddings") + if "cross_attn" in orig_key: + orig_key = orig_key.replace("cross_attn", "encoder_attn") + + return orig_key + + +def read_in_q_k_v(state_dict): + # transformer decoder self-attention layers + for i in range(6): + # read in weights + bias of input projection layer of self-attention + in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + + return im + + +@torch.no_grad() +def convert_deformable_detr_checkpoint( + checkpoint_path, + single_scale, + dilation, + with_box_refine, + two_stage, + pytorch_dump_folder_path, + push_to_hub, +): + """ + Copy/paste/tweak model's weights to our Deformable DETR structure. + """ + + # load default config + config = DeformableDetrConfig() + # set config attributes + if single_scale: + config.num_feature_levels = 1 + config.dilation = dilation + config.with_box_refine = with_box_refine + config.two_stage = two_stage + # set labels + config.num_labels = 91 + repo_id = "huggingface/label-files" + filename = "coco-detection-id2label.json" + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + # load image processor + image_processor = DeformableDetrImageProcessor(format="coco_detection") + + # prepare image + img = prepare_img() + encoding = image_processor(images=img, return_tensors="pt") + pixel_values = encoding["pixel_values"] + + logger.info("Converting model...") + + # load original state dict + state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] + # rename keys + for key in state_dict.copy().keys(): + val = state_dict.pop(key) + state_dict[rename_key(key)] = val + # query, key and value matrices need special treatment + read_in_q_k_v(state_dict) + # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them + prefix = "model." + for key in state_dict.copy().keys(): + if not key.startswith("class_embed") and not key.startswith("bbox_embed"): + val = state_dict.pop(key) + state_dict[prefix + key] = val + # finally, create HuggingFace model and load state dict + model = DeformableDetrForObjectDetection(config) + model.load_state_dict(state_dict) + model.eval() + + device = "cuda" if torch.cuda.is_available() else "cpu" + model.to(device) + # verify our conversion + outputs = model(pixel_values.to(device)) + + expected_logits = torch.tensor( + [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] + ) + expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]]) + + if single_scale: + expected_logits = torch.tensor( + [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] + ) + expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]]) + + if single_scale and dilation: + expected_logits = torch.tensor( + [[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]] + ) + expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]]) + + if with_box_refine: + expected_logits = torch.tensor( + [[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]] + ) + expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]]) + + if with_box_refine and two_stage: + expected_logits = torch.tensor( + [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] + ) + expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]]) + + print("Logits:", outputs.logits[0, :3, :3]) + + assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) + assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) + + print("Everything ok!") + + # Save model and image processor + logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + image_processor.save_pretrained(pytorch_dump_folder_path) + + # Push to hub + if push_to_hub: + model_name = "deformable-detr" + model_name += "-single-scale" if single_scale else "" + model_name += "-dc5" if dilation else "" + model_name += "-with-box-refine" if with_box_refine else "" + model_name += "-two-stage" if two_stage else "" + print("Pushing model to hub...") + model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--checkpoint_path", + type=str, + default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth", + help="Path to Pytorch checkpoint (.pth file) you'd like to convert.", + ) + parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.") + parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.") + parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.") + parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.") + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + required=True, + help="Path to the folder to output PyTorch model.", + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + args = parser.parse_args() + convert_deformable_detr_checkpoint( + args.checkpoint_path, + args.single_scale, + args.dilation, + args.with_box_refine, + args.two_stage, + args.pytorch_dump_folder_path, + args.push_to_hub, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py new file mode 100644 index 0000000000000000000000000000000000000000..f04743e91ceefe5fbad2485e9767f0a97dd6db49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/feature_extraction_deformable_detr.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Deformable DETR.""" + +import warnings + +from ...image_transforms import rgb_to_id as _rgb_to_id +from ...utils import logging +from .image_processing_deformable_detr import DeformableDetrImageProcessor + + +logger = logging.get_logger(__name__) + + +def rgb_to_id(x): + warnings.warn( + "rgb_to_id has moved and will not be importable from this module from v5. " + "Please import from transformers.image_transforms instead.", + FutureWarning, + ) + return _rgb_to_id(x) + + +class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor): + def __init__(self, *args, **kwargs) -> None: + warnings.warn( + "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." + " Please use DeformableDetrImageProcessor instead.", + FutureWarning, + ) + super().__init__(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py new file mode 100644 index 0000000000000000000000000000000000000000..5525eeeb8c58d53f20af014e8c83071af1189ccd --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -0,0 +1,1553 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Deformable DETR.""" + +import io +import pathlib +from collections import defaultdict +from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union + +import numpy as np + +from ...feature_extraction_utils import BatchFeature +from ...image_processing_utils import BaseImageProcessor, get_size_dict +from ...image_transforms import ( + PaddingMode, + center_to_corners_format, + corners_to_center_format, + id_to_rgb, + pad, + rescale, + resize, + rgb_to_id, + to_channel_dimension_format, +) +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + AnnotationFormat, + AnnotationType, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_annotations, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import ( + TensorType, + is_flax_available, + is_jax_tensor, + is_scipy_available, + is_tf_available, + is_tf_tensor, + is_torch_available, + is_torch_tensor, + is_vision_available, + logging, +) + + +if is_torch_available(): + import torch + from torch import nn + + +if is_vision_available(): + import PIL + +if is_scipy_available(): + import scipy.special + import scipy.stats + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) + + +# Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio +def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: + """ + Computes the output image size given the input image size and the desired output size. + + Args: + image_size (`Tuple[int, int]`): + The input image size. + size (`int`): + The desired output size. + max_size (`int`, *optional*): + The maximum allowed output size. + """ + height, width = image_size + if max_size is not None: + min_original_size = float(min((height, width))) + max_original_size = float(max((height, width))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (height <= width and height == size) or (width <= height and width == size): + return height, width + + if width < height: + ow = size + oh = int(size * height / width) + else: + oh = size + ow = int(size * width / height) + return (oh, ow) + + +# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size +def get_resize_output_image_size( + input_image: np.ndarray, + size: Union[int, Tuple[int, int], List[int]], + max_size: Optional[int] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[int, int]: + """ + Computes the output image size given the input image size and the desired output size. If the desired output size + is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output + image size is computed by keeping the aspect ratio of the input image size. + + Args: + input_image (`np.ndarray`): + The image to resize. + size (`int` or `Tuple[int, int]` or `List[int]`): + The desired output size. + max_size (`int`, *optional*): + The maximum allowed output size. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input image. + """ + image_size = get_image_size(input_image, input_data_format) + if isinstance(size, (list, tuple)): + return size + + return get_size_with_aspect_ratio(image_size, size, max_size) + + +# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn +def get_numpy_to_framework_fn(arr) -> Callable: + """ + Returns a function that converts a numpy array to the framework of the input array. + + Args: + arr (`np.ndarray`): The array to convert. + """ + if isinstance(arr, np.ndarray): + return np.array + if is_tf_available() and is_tf_tensor(arr): + import tensorflow as tf + + return tf.convert_to_tensor + if is_torch_available() and is_torch_tensor(arr): + import torch + + return torch.tensor + if is_flax_available() and is_jax_tensor(arr): + import jax.numpy as jnp + + return jnp.array + raise ValueError(f"Cannot convert arrays of type {type(arr)}") + + +# Copied from transformers.models.detr.image_processing_detr.safe_squeeze +def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: + """ + Squeezes an array, but only if the axis specified has dim 1. + """ + if axis is None: + return arr.squeeze() + + try: + return arr.squeeze(axis=axis) + except ValueError: + return arr + + +# Copied from transformers.models.detr.image_processing_detr.normalize_annotation +def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: + image_height, image_width = image_size + norm_annotation = {} + for key, value in annotation.items(): + if key == "boxes": + boxes = value + boxes = corners_to_center_format(boxes) + boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) + norm_annotation[key] = boxes + else: + norm_annotation[key] = value + return norm_annotation + + +# Copied from transformers.models.detr.image_processing_detr.max_across_indices +def max_across_indices(values: Iterable[Any]) -> List[Any]: + """ + Return the maximum value across all indices of an iterable of values. + """ + return [max(values_i) for values_i in zip(*values)] + + +# Copied from transformers.models.detr.image_processing_detr.get_max_height_width +def get_max_height_width( + images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> List[int]: + """ + Get the maximum height and width across all images in a batch. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(images[0]) + + if input_data_format == ChannelDimension.FIRST: + _, max_height, max_width = max_across_indices([img.shape for img in images]) + elif input_data_format == ChannelDimension.LAST: + max_height, max_width, _ = max_across_indices([img.shape for img in images]) + else: + raise ValueError(f"Invalid channel dimension format: {input_data_format}") + return (max_height, max_width) + + +# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask +def make_pixel_mask( + image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> np.ndarray: + """ + Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. + + Args: + image (`np.ndarray`): + Image to make the pixel mask for. + output_size (`Tuple[int, int]`): + Output size of the mask. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + mask = np.zeros(output_size, dtype=np.int64) + mask[:input_height, :input_width] = 1 + return mask + + +# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask +def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: + """ + Convert a COCO polygon annotation to a mask. + + Args: + segmentations (`List[List[float]]`): + List of polygons, each polygon represented by a list of x-y coordinates. + height (`int`): + Height of the mask. + width (`int`): + Width of the mask. + """ + try: + from pycocotools import mask as coco_mask + except ImportError: + raise ImportError("Pycocotools is not installed in your environment.") + + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = np.asarray(mask, dtype=np.uint8) + mask = np.any(mask, axis=2) + masks.append(mask) + if masks: + masks = np.stack(masks, axis=0) + else: + masks = np.zeros((0, height, width), dtype=np.uint8) + + return masks + + +# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr +def prepare_coco_detection_annotation( + image, + target, + return_segmentation_masks: bool = False, + input_data_format: Optional[Union[ChannelDimension, str]] = None, +): + """ + Convert the target in COCO format into the format expected by DeformableDetr. + """ + image_height, image_width = get_image_size(image, channel_dim=input_data_format) + + image_id = target["image_id"] + image_id = np.asarray([image_id], dtype=np.int64) + + # Get all COCO annotations for the given image. + annotations = target["annotations"] + annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] + + classes = [obj["category_id"] for obj in annotations] + classes = np.asarray(classes, dtype=np.int64) + + # for conversion to coco api + area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) + iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) + + boxes = [obj["bbox"] for obj in annotations] + # guard against no boxes via resizing + boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) + boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + + new_target = {} + new_target["image_id"] = image_id + new_target["class_labels"] = classes[keep] + new_target["boxes"] = boxes[keep] + new_target["area"] = area[keep] + new_target["iscrowd"] = iscrowd[keep] + new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) + + if annotations and "keypoints" in annotations[0]: + keypoints = [obj["keypoints"] for obj in annotations] + # Converting the filtered keypoints list to a numpy array + keypoints = np.asarray(keypoints, dtype=np.float32) + # Apply the keep mask here to filter the relevant annotations + keypoints = keypoints[keep] + num_keypoints = keypoints.shape[0] + keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints + new_target["keypoints"] = keypoints + + if return_segmentation_masks: + segmentation_masks = [obj["segmentation"] for obj in annotations] + masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) + new_target["masks"] = masks[keep] + + return new_target + + +# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes +def masks_to_boxes(masks: np.ndarray) -> np.ndarray: + """ + Compute the bounding boxes around the provided panoptic segmentation masks. + + Args: + masks: masks in format `[number_masks, height, width]` where N is the number of masks + + Returns: + boxes: bounding boxes in format `[number_masks, 4]` in xyxy format + """ + if masks.size == 0: + return np.zeros((0, 4)) + + h, w = masks.shape[-2:] + y = np.arange(0, h, dtype=np.float32) + x = np.arange(0, w, dtype=np.float32) + # see https://github.com/pytorch/pytorch/issues/50276 + y, x = np.meshgrid(y, x, indexing="ij") + + x_mask = masks * np.expand_dims(x, axis=0) + x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) + x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) + x_min = x.filled(fill_value=1e8) + x_min = x_min.reshape(x_min.shape[0], -1).min(-1) + + y_mask = masks * np.expand_dims(y, axis=0) + y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) + y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) + y_min = y.filled(fill_value=1e8) + y_min = y_min.reshape(y_min.shape[0], -1).min(-1) + + return np.stack([x_min, y_min, x_max, y_max], 1) + + +# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr +def prepare_coco_panoptic_annotation( + image: np.ndarray, + target: Dict, + masks_path: Union[str, pathlib.Path], + return_masks: bool = True, + input_data_format: Union[ChannelDimension, str] = None, +) -> Dict: + """ + Prepare a coco panoptic annotation for DeformableDetr. + """ + image_height, image_width = get_image_size(image, channel_dim=input_data_format) + annotation_path = pathlib.Path(masks_path) / target["file_name"] + + new_target = {} + new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) + new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) + new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) + + if "segments_info" in target: + masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) + masks = rgb_to_id(masks) + + ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) + masks = masks == ids[:, None, None] + masks = masks.astype(np.uint8) + if return_masks: + new_target["masks"] = masks + new_target["boxes"] = masks_to_boxes(masks) + new_target["class_labels"] = np.array( + [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 + ) + new_target["iscrowd"] = np.asarray( + [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 + ) + new_target["area"] = np.asarray( + [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 + ) + + return new_target + + +# Copied from transformers.models.detr.image_processing_detr.get_segmentation_image +def get_segmentation_image( + masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False +): + h, w = input_size + final_h, final_w = target_size + + m_id = scipy.special.softmax(masks.transpose(0, 1), -1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = np.zeros((h, w), dtype=np.int64) + else: + m_id = m_id.argmax(-1).reshape(h, w) + + if deduplicate: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + for eq_id in equiv: + m_id[m_id == eq_id] = equiv[0] + + seg_img = id_to_rgb(m_id) + seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) + return seg_img + + +# Copied from transformers.models.detr.image_processing_detr.get_mask_area +def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: + final_h, final_w = target_size + np_seg_img = seg_img.astype(np.uint8) + np_seg_img = np_seg_img.reshape(final_h, final_w, 3) + m_id = rgb_to_id(np_seg_img) + area = [(m_id == i).sum() for i in range(n_classes)] + return area + + +# Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities +def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + probs = scipy.special.softmax(logits, axis=-1) + labels = probs.argmax(-1, keepdims=True) + scores = np.take_along_axis(probs, labels, axis=-1) + scores, labels = scores.squeeze(-1), labels.squeeze(-1) + return scores, labels + + +# Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample +def post_process_panoptic_sample( + out_logits: np.ndarray, + masks: np.ndarray, + boxes: np.ndarray, + processed_size: Tuple[int, int], + target_size: Tuple[int, int], + is_thing_map: Dict, + threshold=0.85, +) -> Dict: + """ + Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample. + + Args: + out_logits (`torch.Tensor`): + The logits for this sample. + masks (`torch.Tensor`): + The predicted segmentation masks for this sample. + boxes (`torch.Tensor`): + The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, + width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). + processed_size (`Tuple[int, int]`): + The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size + after data augmentation but before batching. + target_size (`Tuple[int, int]`): + The target size of the image, `(height, width)` corresponding to the requested final size of the + prediction. + is_thing_map (`Dict`): + A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. + threshold (`float`, *optional*, defaults to 0.85): + The threshold used to binarize the segmentation masks. + """ + # we filter empty queries and detection below threshold + scores, labels = score_labels_from_class_probabilities(out_logits) + keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) + + cur_scores = scores[keep] + cur_classes = labels[keep] + cur_boxes = center_to_corners_format(boxes[keep]) + + if len(cur_boxes) != len(cur_classes): + raise ValueError("Not as many boxes as there are classes") + + cur_masks = masks[keep] + cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) + cur_masks = safe_squeeze(cur_masks, 1) + b, h, w = cur_masks.shape + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.reshape(b, -1) + stuff_equiv_classes = defaultdict(list) + for k, label in enumerate(cur_classes): + if not is_thing_map[label]: + stuff_equiv_classes[label].append(k) + + seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) + area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) + + # We filter out any mask that is too small + if cur_classes.size() > 0: + # We know filter empty masks as long as we find some + filtered_small = np.array([a <= 4 for a in area], dtype=bool) + while filtered_small.any(): + cur_masks = cur_masks[~filtered_small] + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) + area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) + filtered_small = np.array([a <= 4 for a in area], dtype=bool) + else: + cur_classes = np.ones((1, 1), dtype=np.int64) + + segments_info = [ + {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} + for i, (cat, a) in enumerate(zip(cur_classes, area)) + ] + del cur_classes + + with io.BytesIO() as out: + PIL.Image.fromarray(seg_img).save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + + return predictions + + +# Copied from transformers.models.detr.image_processing_detr.resize_annotation +def resize_annotation( + annotation: Dict[str, Any], + orig_size: Tuple[int, int], + target_size: Tuple[int, int], + threshold: float = 0.5, + resample: PILImageResampling = PILImageResampling.NEAREST, +): + """ + Resizes an annotation to a target size. + + Args: + annotation (`Dict[str, Any]`): + The annotation dictionary. + orig_size (`Tuple[int, int]`): + The original size of the input image. + target_size (`Tuple[int, int]`): + The target size of the image, as returned by the preprocessing `resize` step. + threshold (`float`, *optional*, defaults to 0.5): + The threshold used to binarize the segmentation masks. + resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): + The resampling filter to use when resizing the masks. + """ + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) + ratio_height, ratio_width = ratios + + new_annotation = {} + new_annotation["size"] = target_size + + for key, value in annotation.items(): + if key == "boxes": + boxes = value + scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) + new_annotation["boxes"] = scaled_boxes + elif key == "area": + area = value + scaled_area = area * (ratio_width * ratio_height) + new_annotation["area"] = scaled_area + elif key == "masks": + masks = value[:, None] + masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) + masks = masks.astype(np.float32) + masks = masks[:, 0] > threshold + new_annotation["masks"] = masks + elif key == "size": + new_annotation["size"] = target_size + else: + new_annotation[key] = value + + return new_annotation + + +# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle +def binary_mask_to_rle(mask): + """ + Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return list(runs) + + +# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + +# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + + Args: + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + + return masks[to_keep], scores[to_keep], labels[to_keep] + + +# Copied from transformers.models.detr.image_processing_detr.check_segment_validity +def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= mask_threshold).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +# Copied from transformers.models.detr.image_processing_detr.compute_segments +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = nn.functional.interpolate( + mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity( + mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold + ) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + +class DeformableDetrImageProcessor(BaseImageProcessor): + r""" + Constructs a Deformable DETR image processor. + + Args: + format (`str`, *optional*, defaults to `"coco_detection"`): + Data format of the annotations. One of "coco_detection" or "coco_panoptic". + do_resize (`bool`, *optional*, defaults to `True`): + Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be + overridden by the `do_resize` parameter in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): + Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in + the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use if resizing the image. + do_rescale (`bool`, *optional*, defaults to `True`): + Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize: + Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the + `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean values to use when normalizing the image. Can be a single value or a list of values, one for each + channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one + for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_annotations (`bool`, *optional*, defaults to `True`): + Controls whether to convert the annotations to the format expected by the DETR model. Converts the + bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. + Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` + method. If `True` will pad the images in the batch to the largest height and width in the batch. + Padding will be applied to the bottom and right of the image with zeros. + """ + + model_input_names = ["pixel_values", "pixel_mask"] + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ + def __init__( + self, + format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Union[float, List[float]] = None, + image_std: Union[float, List[float]] = None, + do_convert_annotations: Optional[bool] = None, + do_pad: bool = True, + **kwargs, + ) -> None: + if "pad_and_return_pixel_mask" in kwargs: + do_pad = kwargs.pop("pad_and_return_pixel_mask") + + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` parameter is deprecated and will be removed in v4.26. " + "Please specify in `size['longest_edge'] instead`.", + ) + max_size = kwargs.pop("max_size") + else: + max_size = None if size is None else 1333 + + size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} + size = get_size_dict(size, max_size=max_size, default_to_square=False) + + # Backwards compatibility + if do_convert_annotations is None: + do_convert_annotations = do_normalize + + super().__init__(**kwargs) + self.format = format + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.do_convert_annotations = do_convert_annotations + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self.do_pad = do_pad + self._valid_processor_keys = [ + "images", + "annotations", + "return_segmentation_masks", + "masks_path", + "do_resize", + "size", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "do_convert_annotations", + "image_mean", + "image_std", + "do_pad", + "format", + "return_tensors", + "data_format", + "input_data_format", + ] + + @classmethod + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr + def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): + """ + Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is + created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600, + max_size=800)` + """ + image_processor_dict = image_processor_dict.copy() + if "max_size" in kwargs: + image_processor_dict["max_size"] = kwargs.pop("max_size") + if "pad_and_return_pixel_mask" in kwargs: + image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") + return super().from_dict(image_processor_dict, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr + def prepare_annotation( + self, + image: np.ndarray, + target: Dict, + format: Optional[AnnotationFormat] = None, + return_segmentation_masks: bool = None, + masks_path: Optional[Union[str, pathlib.Path]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> Dict: + """ + Prepare an annotation for feeding into DeformableDetr model. + """ + format = format if format is not None else self.format + + if format == AnnotationFormat.COCO_DETECTION: + return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks + target = prepare_coco_detection_annotation( + image, target, return_segmentation_masks, input_data_format=input_data_format + ) + elif format == AnnotationFormat.COCO_PANOPTIC: + return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks + target = prepare_coco_panoptic_annotation( + image, + target, + masks_path=masks_path, + return_masks=return_segmentation_masks, + input_data_format=input_data_format, + ) + else: + raise ValueError(f"Format {format} is not supported.") + return target + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare + def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): + logger.warning_once( + "The `prepare` method is deprecated and will be removed in a v4.33. " + "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " + "does not return the image anymore.", + ) + target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) + return image, target + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask + def convert_coco_poly_to_mask(self, *args, **kwargs): + logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ") + return convert_coco_poly_to_mask(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection + def prepare_coco_detection(self, *args, **kwargs): + logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ") + return prepare_coco_detection_annotation(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic + def prepare_coco_panoptic(self, *args, **kwargs): + logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ") + return prepare_coco_panoptic_annotation(*args, **kwargs) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an + int, smaller edge of the image will be matched to this number. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or + `height` and `width`. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use if resizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` parameter is deprecated and will be removed in v4.26. " + "Please specify in `size['longest_edge'] instead`.", + ) + max_size = kwargs.pop("max_size") + else: + max_size = None + size = get_size_dict(size, max_size=max_size, default_to_square=False) + if "shortest_edge" in size and "longest_edge" in size: + size = get_resize_output_image_size( + image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format + ) + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + else: + raise ValueError( + "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" + f" {size.keys()}." + ) + image = resize( + image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs + ) + return image + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation + def resize_annotation( + self, + annotation, + orig_size, + size, + resample: PILImageResampling = PILImageResampling.NEAREST, + ) -> Dict: + """ + Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched + to this number. + """ + return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale + def rescale( + self, + image: np.ndarray, + rescale_factor: float, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Rescale the image by the given factor. image = image * rescale_factor. + + Args: + image (`np.ndarray`): + Image to rescale. + rescale_factor (`float`): + The value to use for rescaling. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. If unset, is inferred from the input image. Can be + one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation + def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: + """ + Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to + `[center_x, center_y, width, height]` format and from absolute to relative pixel values. + """ + return normalize_annotation(annotation, image_size=image_size) + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image + def _update_annotation_for_padded_image( + self, + annotation: Dict, + input_image_size: Tuple[int, int], + output_image_size: Tuple[int, int], + padding, + update_bboxes, + ) -> Dict: + """ + Update the annotation for a padded image. + """ + new_annotation = {} + new_annotation["size"] = output_image_size + + for key, value in annotation.items(): + if key == "masks": + masks = value + masks = pad( + masks, + padding, + mode=PaddingMode.CONSTANT, + constant_values=0, + input_data_format=ChannelDimension.FIRST, + ) + masks = safe_squeeze(masks, 1) + new_annotation["masks"] = masks + elif key == "boxes" and update_bboxes: + boxes = value + boxes *= np.asarray( + [ + input_image_size[1] / output_image_size[1], + input_image_size[0] / output_image_size[0], + input_image_size[1] / output_image_size[1], + input_image_size[0] / output_image_size[0], + ] + ) + new_annotation["boxes"] = boxes + elif key == "size": + new_annotation["size"] = output_image_size + else: + new_annotation[key] = value + return new_annotation + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image + def _pad_image( + self, + image: np.ndarray, + output_size: Tuple[int, int], + annotation: Optional[Dict[str, Any]] = None, + constant_values: Union[float, Iterable[float]] = 0, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + update_bboxes: bool = True, + ) -> np.ndarray: + """ + Pad an image with zeros to the given size. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = output_size + + pad_bottom = output_height - input_height + pad_right = output_width - input_width + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=PaddingMode.CONSTANT, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + if annotation is not None: + annotation = self._update_annotation_for_padded_image( + annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes + ) + return padded_image, annotation + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad + def pad( + self, + images: List[np.ndarray], + annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, + constant_values: Union[float, Iterable[float]] = 0, + return_pixel_mask: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + update_bboxes: bool = True, + ) -> BatchFeature: + """ + Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width + in the batch and optionally returns their corresponding pixel mask. + + Args: + images (List[`np.ndarray`]): + Images to pad. + annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): + Annotations to transform according to the padding that is applied to the images. + constant_values (`float` or `Iterable[float]`, *optional*): + The value to use for the padding if `mode` is `"constant"`. + return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether to return a pixel mask. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + update_bboxes (`bool`, *optional*, defaults to `True`): + Whether to update the bounding boxes in the annotations to match the padded images. If the + bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` + format, the bounding boxes will not be updated. + """ + pad_size = get_max_height_width(images, input_data_format=input_data_format) + + annotation_list = annotations if annotations is not None else [None] * len(images) + padded_images = [] + padded_annotations = [] + for image, annotation in zip(images, annotation_list): + padded_image, padded_annotation = self._pad_image( + image, + pad_size, + annotation, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + update_bboxes=update_bboxes, + ) + padded_images.append(padded_image) + padded_annotations.append(padded_annotation) + + data = {"pixel_values": padded_images} + + if return_pixel_mask: + masks = [ + make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) + for image in images + ] + data["pixel_mask"] = masks + + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + if annotations is not None: + encoded_inputs["labels"] = [ + BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations + ] + + return encoded_inputs + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess + def preprocess( + self, + images: ImageInput, + annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, + return_segmentation_masks: bool = None, + masks_path: Optional[Union[str, pathlib.Path]] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample=None, # PILImageResampling + do_rescale: Optional[bool] = None, + rescale_factor: Optional[Union[int, float]] = None, + do_normalize: Optional[bool] = None, + do_convert_annotations: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + format: Optional[Union[str, AnnotationFormat]] = None, + return_tensors: Optional[Union[TensorType, str]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> BatchFeature: + """ + Preprocess an image or a batch of images so that it can be used by the model. + + Args: + images (`ImageInput`): + Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging + from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. + annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): + List of annotations associated with the image or batch of images. If annotation is for object + detection, the annotations should be a dictionary with the following keys: + - "image_id" (`int`): The image id. + - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a + dictionary. An image can have no annotations, in which case the list should be empty. + If annotation is for segmentation, the annotations should be a dictionary with the following keys: + - "image_id" (`int`): The image id. + - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. + An image can have no segments, in which case the list should be empty. + - "file_name" (`str`): The file name of the image. + return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): + Whether to return segmentation masks. + masks_path (`str` or `pathlib.Path`, *optional*): + Path to the directory containing the segmentation masks. + do_resize (`bool`, *optional*, defaults to self.do_resize): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to self.size): + Size of the image after resizing. + resample (`PILImageResampling`, *optional*, defaults to self.resample): + Resampling filter to use when resizing the image. + do_rescale (`bool`, *optional*, defaults to self.do_rescale): + Whether to rescale the image. + rescale_factor (`float`, *optional*, defaults to self.rescale_factor): + Rescale factor to use when rescaling the image. + do_normalize (`bool`, *optional*, defaults to self.do_normalize): + Whether to normalize the image. + do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): + Whether to convert the annotations to the format expected by the model. Converts the bounding + boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` + and in relative coordinates. + image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): + Mean to use when normalizing the image. + image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): + Standard deviation to use when normalizing the image. + do_pad (`bool`, *optional*, defaults to self.do_pad): + Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch + and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros. + format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): + Format of the annotations. + return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): + Type of tensors to return. If `None`, will return the list of images. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + if "pad_and_return_pixel_mask" in kwargs: + logger.warning_once( + "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " + "use `do_pad` instead." + ) + do_pad = kwargs.pop("pad_and_return_pixel_mask") + + max_size = None + if "max_size" in kwargs: + logger.warning_once( + "The `max_size` argument is deprecated and will be removed in a future version, use" + " `size['longest_edge']` instead." + ) + size = kwargs.pop("max_size") + + do_resize = self.do_resize if do_resize is None else do_resize + size = self.size if size is None else size + size = get_size_dict(size=size, max_size=max_size, default_to_square=False) + resample = self.resample if resample is None else resample + do_rescale = self.do_rescale if do_rescale is None else do_rescale + rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor + do_normalize = self.do_normalize if do_normalize is None else do_normalize + image_mean = self.image_mean if image_mean is None else image_mean + image_std = self.image_std if image_std is None else image_std + do_convert_annotations = ( + self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations + ) + do_pad = self.do_pad if do_pad is None else do_pad + format = self.format if format is None else format + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated. + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if annotations is not None and isinstance(annotations, dict): + annotations = [annotations] + + if annotations is not None and len(images) != len(annotations): + raise ValueError( + f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." + ) + + format = AnnotationFormat(format) + if annotations is not None: + validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) + + if ( + masks_path is not None + and format == AnnotationFormat.COCO_PANOPTIC + and not isinstance(masks_path, (pathlib.Path, str)) + ): + raise ValueError( + "The path to the directory containing the mask PNG files should be provided as a" + f" `pathlib.Path` or string object, but is {type(masks_path)} instead." + ) + + # All transformations expect numpy arrays + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) + if annotations is not None: + prepared_images = [] + prepared_annotations = [] + for image, target in zip(images, annotations): + target = self.prepare_annotation( + image, + target, + format, + return_segmentation_masks=return_segmentation_masks, + masks_path=masks_path, + input_data_format=input_data_format, + ) + prepared_images.append(image) + prepared_annotations.append(target) + images = prepared_images + annotations = prepared_annotations + del prepared_images, prepared_annotations + + # transformations + if do_resize: + if annotations is not None: + resized_images, resized_annotations = [], [] + for image, target in zip(images, annotations): + orig_size = get_image_size(image, input_data_format) + resized_image = self.resize( + image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format + ) + resized_annotation = self.resize_annotation( + target, orig_size, get_image_size(resized_image, input_data_format) + ) + resized_images.append(resized_image) + resized_annotations.append(resized_annotation) + images = resized_images + annotations = resized_annotations + del resized_images, resized_annotations + else: + images = [ + self.resize(image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] + + if do_normalize: + images = [ + self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images + ] + + if do_convert_annotations and annotations is not None: + annotations = [ + self.normalize_annotation(annotation, get_image_size(image, input_data_format)) + for annotation, image in zip(annotations, images) + ] + + if do_pad: + # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} + encoded_inputs = self.pad( + images, + annotations=annotations, + return_pixel_mask=True, + data_format=data_format, + input_data_format=input_data_format, + update_bboxes=do_convert_annotations, + return_tensors=return_tensors, + ) + else: + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + for image in images + ] + encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) + if annotations is not None: + encoded_inputs["labels"] = [ + BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations + ] + + return encoded_inputs + + # POSTPROCESSING METHODS - TODO: add support for other frameworks + def post_process(self, outputs, target_sizes): + """ + Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, + top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. + + Args: + outputs ([`DeformableDetrObjectDetectionOutput`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). For visualization, this should be the image size + after data augment, but before padding. + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + logger.warning_once( + "`post_process` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.", + ) + + out_logits, out_bbox = outputs.logits, outputs.pred_boxes + + if len(out_logits) != len(target_sizes): + raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") + if target_sizes.shape[1] != 2: + raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") + + prob = out_logits.sigmoid() + topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) + scores = topk_values + topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") + labels = topk_indexes % out_logits.shape[2] + boxes = center_to_corners_format(out_bbox) + boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) + + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] + + return results + + def post_process_object_detection( + self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100 + ): + """ + Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, + top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. + + Args: + outputs ([`DetrObjectDetectionOutput`]): + Raw outputs of the model. + threshold (`float`, *optional*): + Score threshold to keep object detection predictions. + target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): + Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size + (height, width) of each image in the batch. If left to None, predictions will not be resized. + top_k (`int`, *optional*, defaults to 100): + Keep only top k bounding boxes before filtering by thresholding. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + out_logits, out_bbox = outputs.logits, outputs.pred_boxes + + if target_sizes is not None: + if len(out_logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + prob = out_logits.sigmoid() + prob = prob.view(out_logits.shape[0], -1) + k_value = min(top_k, prob.size(1)) + topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) + scores = topk_values + topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor") + labels = topk_indexes % out_logits.shape[2] + boxes = center_to_corners_format(out_bbox) + boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) + + # and from relative [0, 1] to absolute [0, height] coordinates + if target_sizes is not None: + if isinstance(target_sizes, List): + img_h = torch.Tensor([i[0] for i in target_sizes]) + img_w = torch.Tensor([i[1] for i in target_sizes]) + else: + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) + boxes = boxes * scale_fct[:, None, :] + + results = [] + for s, l, b in zip(scores, labels, boxes): + score = s[s > threshold] + label = l[s > threshold] + box = b[s > threshold] + results.append({"scores": score, "labels": label, "boxes": box}) + + return results diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a822e2764170c24c7098956e81788856385451 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/load_custom.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Loading of Deformable DETR's CUDA kernels""" +import os +from pathlib import Path + + +def load_cuda_kernels(): + from torch.utils.cpp_extension import load + + root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr" + src_files = [ + root / filename + for filename in [ + "vision.cpp", + os.path.join("cpu", "ms_deform_attn_cpu.cpp"), + os.path.join("cuda", "ms_deform_attn_cuda.cu"), + ] + ] + + load( + "MultiScaleDeformableAttention", + src_files, + with_cuda=True, + extra_include_paths=[str(root)], + extra_cflags=["-DWITH_CUDA=1"], + extra_cuda_cflags=[ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ], + ) + + import MultiScaleDeformableAttention as MSDA + + return MSDA diff --git a/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ac7cffc7ab44578ab33463071d409bf628264f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -0,0 +1,2536 @@ +# coding=utf-8 +# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Deformable DETR model.""" + + +import copy +import math +import os +import warnings +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import Tensor, nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ...activations import ACT2FN +from ...file_utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_scipy_available, + is_timm_available, + is_torch_cuda_available, + is_vision_available, + replace_return_docstrings, + requires_backends, +) +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import meshgrid +from ...utils import is_accelerate_available, is_ninja_available, logging +from ...utils.backbone_utils import load_backbone +from .configuration_deformable_detr import DeformableDetrConfig + + +logger = logging.get_logger(__name__) + +MultiScaleDeformableAttention = None + + +def load_cuda_kernels(): + from torch.utils.cpp_extension import load + + global MultiScaleDeformableAttention + + root = Path(__file__).resolve().parent.parent.parent / "kernels" / "deformable_detr" + src_files = [ + root / filename + for filename in [ + "vision.cpp", + os.path.join("cpu", "ms_deform_attn_cpu.cpp"), + os.path.join("cuda", "ms_deform_attn_cuda.cu"), + ] + ] + + MultiScaleDeformableAttention = load( + "MultiScaleDeformableAttention", + src_files, + with_cuda=True, + extra_include_paths=[str(root)], + extra_cflags=["-DWITH_CUDA=1"], + extra_cuda_cflags=[ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ], + ) + + +if is_vision_available(): + from transformers.image_transforms import center_to_corners_format + +if is_accelerate_available(): + from accelerate import PartialState + from accelerate.utils import reduce + + +class MultiScaleDeformableAttentionFunction(Function): + @staticmethod + def forward( + context, + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step, + ): + context.im2col_step = im2col_step + output = MultiScaleDeformableAttention.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + context.im2col_step, + ) + context.save_for_backward( + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights + ) + return output + + @staticmethod + @once_differentiable + def backward(context, grad_output): + ( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + ) = context.saved_tensors + grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output, + context.im2col_step, + ) + + return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None + + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + +if is_timm_available(): + from timm import create_model + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "DeformableDetrConfig" +_CHECKPOINT_FOR_DOC = "sensetime/deformable-detr" + + +from ..deprecated._archive_maps import DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class DeformableDetrDecoderOutput(ModelOutput): + """ + Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to + BaseModelOutputWithCrossAttentions, namely: + - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) + - a stacked tensor of intermediate reference points. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class DeformableDetrModelOutput(ModelOutput): + """ + Base class for outputs of the Deformable DETR encoder-decoder model. + + Args: + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, + num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are + picked as region proposals in the first stage. Output of bounding box binary classification (i.e. + foreground and background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + init_reference_points: torch.FloatTensor = None + last_hidden_state: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + enc_outputs_class: Optional[torch.FloatTensor] = None + enc_outputs_coord_logits: Optional[torch.FloatTensor] = None + + +@dataclass +class DeformableDetrObjectDetectionOutput(ModelOutput): + """ + Output type of [`DeformableDetrForObjectDetection`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): + Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a + bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized + scale-invariant IoU loss. + loss_dict (`Dict`, *optional*): + A dictionary containing the individual losses. Useful for logging. + logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): + Classification logits (including no-object) for all queries. + pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These + values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding + possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. + auxiliary_outputs (`list[Dict]`, *optional*): + Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) + and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and + `pred_boxes`) for each decoder layer. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, + num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, + 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average + in the self-attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are + picked as region proposals in the first stage. Output of bounding box binary classification (i.e. + foreground and background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + loss: Optional[torch.FloatTensor] = None + loss_dict: Optional[Dict] = None + logits: torch.FloatTensor = None + pred_boxes: torch.FloatTensor = None + auxiliary_outputs: Optional[List[Dict]] = None + init_reference_points: Optional[torch.FloatTensor] = None + last_hidden_state: Optional[torch.FloatTensor] = None + intermediate_hidden_states: Optional[torch.FloatTensor] = None + intermediate_reference_points: Optional[torch.FloatTensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + enc_outputs_class: Optional = None + enc_outputs_coord_logits: Optional = None + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr +class DeformableDetrFrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than + torchvision.models.resnet[18,34,50,101] produce nans. + """ + + def __init__(self, n): + super().__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + # move reshapes to the beginning + # to make it user-friendly + weight = self.weight.reshape(1, -1, 1, 1) + bias = self.bias.reshape(1, -1, 1, 1) + running_var = self.running_var.reshape(1, -1, 1, 1) + running_mean = self.running_mean.reshape(1, -1, 1, 1) + epsilon = 1e-5 + scale = weight * (running_var + epsilon).rsqrt() + bias = bias - running_mean * scale + return x * scale + bias + + +# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr +def replace_batch_norm(model): + r""" + Recursively replace all `torch.nn.BatchNorm2d` with `DeformableDetrFrozenBatchNorm2d`. + + Args: + model (torch.nn.Module): + input model + """ + for name, module in model.named_children(): + if isinstance(module, nn.BatchNorm2d): + new_module = DeformableDetrFrozenBatchNorm2d(module.num_features) + + if not module.weight.device == torch.device("meta"): + new_module.weight.data.copy_(module.weight) + new_module.bias.data.copy_(module.bias) + new_module.running_mean.data.copy_(module.running_mean) + new_module.running_var.data.copy_(module.running_var) + + model._modules[name] = new_module + + if len(list(module.children())) > 0: + replace_batch_norm(module) + + +class DeformableDetrConvEncoder(nn.Module): + """ + Convolutional backbone, using either the AutoBackbone API or one from the timm library. + + nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above. + + """ + + def __init__(self, config): + super().__init__() + + self.config = config + + if config.use_timm_backbone: + requires_backends(self, ["timm"]) + kwargs = {} + if config.dilation: + kwargs["output_stride"] = 16 + backbone = create_model( + config.backbone, + pretrained=config.use_pretrained_backbone, + features_only=True, + out_indices=(2, 3, 4) if config.num_feature_levels > 1 else (4,), + in_chans=config.num_channels, + **kwargs, + ) + else: + backbone = load_backbone(config) + + # replace batch norm by frozen batch norm + with torch.no_grad(): + replace_batch_norm(backbone) + self.model = backbone + self.intermediate_channel_sizes = ( + self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels + ) + + backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type + if "resnet" in backbone_model_type: + for name, parameter in self.model.named_parameters(): + if config.use_timm_backbone: + if "layer2" not in name and "layer3" not in name and "layer4" not in name: + parameter.requires_grad_(False) + else: + if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: + parameter.requires_grad_(False) + + # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr + def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): + # send pixel_values through the model to get list of feature maps + features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + + out = [] + for feature_map in features: + # downsample pixel_mask to match shape of corresponding feature_map + mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] + out.append((feature_map, mask)) + return out + + +# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr +class DeformableDetrConvModel(nn.Module): + """ + This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. + """ + + def __init__(self, conv_encoder, position_embedding): + super().__init__() + self.conv_encoder = conv_encoder + self.position_embedding = position_embedding + + def forward(self, pixel_values, pixel_mask): + # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples + out = self.conv_encoder(pixel_values, pixel_mask) + pos = [] + for feature_map, mask in out: + # position encoding + pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) + + return out, pos + + +class DeformableDetrSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.embedding_dim = embedding_dim + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, pixel_values, pixel_mask): + if pixel_mask is None: + raise ValueError("No pixel mask provided") + y_embed = pixel_mask.cumsum(1, dtype=torch.float32) + x_embed = pixel_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding +class DeformableDetrLearnedPositionEmbedding(nn.Module): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, embedding_dim=256): + super().__init__() + self.row_embeddings = nn.Embedding(50, embedding_dim) + self.column_embeddings = nn.Embedding(50, embedding_dim) + + def forward(self, pixel_values, pixel_mask=None): + height, width = pixel_values.shape[-2:] + width_values = torch.arange(width, device=pixel_values.device) + height_values = torch.arange(height, device=pixel_values.device) + x_emb = self.column_embeddings(width_values) + y_emb = self.row_embeddings(height_values) + pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) + pos = pos.permute(2, 0, 1) + pos = pos.unsqueeze(0) + pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) + return pos + + +# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr +def build_position_encoding(config): + n_steps = config.d_model // 2 + if config.position_embedding_type == "sine": + # TODO find a better way of exposing other arguments + position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True) + elif config.position_embedding_type == "learned": + position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps) + else: + raise ValueError(f"Not supported {config.position_embedding_type}") + + return position_embedding + + +def multi_scale_deformable_attention( + value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor +) -> Tensor: + batch_size, _, num_heads, hidden_dim = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape + value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level_id, (height, width) in enumerate(value_spatial_shapes): + # batch_size, height*width, num_heads, hidden_dim + # -> batch_size, height*width, num_heads*hidden_dim + # -> batch_size, num_heads*hidden_dim, height*width + # -> batch_size*num_heads, hidden_dim, height, width + value_l_ = ( + value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) + ) + # batch_size, num_queries, num_heads, num_points, 2 + # -> batch_size, num_heads, num_queries, num_points, 2 + # -> batch_size*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) + # batch_size*num_heads, hidden_dim, num_queries, num_points + sampling_value_l_ = nn.functional.grid_sample( + value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False + ) + sampling_value_list.append(sampling_value_l_) + # (batch_size, num_queries, num_heads, num_levels, num_points) + # -> (batch_size, num_heads, num_queries, num_levels, num_points) + # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + batch_size * num_heads, 1, num_queries, num_levels * num_points + ) + output = ( + (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) + .sum(-1) + .view(batch_size, num_heads * hidden_dim, num_queries) + ) + return output.transpose(1, 2).contiguous() + + +class DeformableDetrMultiscaleDeformableAttention(nn.Module): + """ + Multiscale deformable attention as proposed in Deformable DETR. + """ + + def __init__(self, config: DeformableDetrConfig, num_heads: int, n_points: int): + super().__init__() + + kernel_loaded = MultiScaleDeformableAttention is not None + if is_torch_cuda_available() and is_ninja_available() and not kernel_loaded: + try: + load_cuda_kernels() + except Exception as e: + logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") + + if config.d_model % num_heads != 0: + raise ValueError( + f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}" + ) + dim_per_head = config.d_model // num_heads + # check if dim_per_head is power of 2 + if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): + warnings.warn( + "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" + " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" + " implementation." + ) + + self.im2col_step = 64 + + self.d_model = config.d_model + self.n_levels = config.num_feature_levels + self.n_heads = num_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2) + self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points) + self.value_proj = nn.Linear(config.d_model, config.d_model) + self.output_proj = nn.Linear(config.d_model, config.d_model) + + self.disable_custom_kernels = config.disable_custom_kernels + + self._reset_parameters() + + def _reset_parameters(self): + nn.init.constant_(self.sampling_offsets.weight.data, 0.0) + default_dtype = torch.get_default_dtype() + thetas = torch.arange(self.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = ( + (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) + .view(self.n_heads, 1, 1, 2) + .repeat(1, self.n_levels, self.n_points, 1) + ) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + nn.init.constant_(self.attention_weights.weight.data, 0.0) + nn.init.constant_(self.attention_weights.bias.data, 0.0) + nn.init.xavier_uniform_(self.value_proj.weight.data) + nn.init.constant_(self.value_proj.bias.data, 0.0) + nn.init.xavier_uniform_(self.output_proj.weight.data) + nn.init.constant_(self.output_proj.bias.data, 0.0) + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + batch_size, num_queries, _ = hidden_states.shape + batch_size, sequence_length, _ = encoder_hidden_states.shape + if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: + raise ValueError( + "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" + ) + + value = self.value_proj(encoder_hidden_states) + if attention_mask is not None: + # we invert the attention_mask + value = value.masked_fill(~attention_mask[..., None], float(0)) + value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 + ) + attention_weights = self.attention_weights(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels * self.n_points + ) + attention_weights = F.softmax(attention_weights, -1).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points + ) + # batch_size, num_queries, n_heads, n_levels, n_points, 2 + num_coordinates = reference_points.shape[-1] + if num_coordinates == 2: + offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = ( + reference_points[:, :, None, :, None, :] + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + ) + elif num_coordinates == 4: + sampling_locations = ( + reference_points[:, :, None, :, None, :2] + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + ) + else: + raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") + + if self.disable_custom_kernels: + # PyTorch implementation + output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) + else: + try: + # custom kernel + output = MultiScaleDeformableAttentionFunction.apply( + value, + spatial_shapes, + level_start_index, + sampling_locations, + attention_weights, + self.im2col_step, + ) + except Exception: + # PyTorch implementation + output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + + return output, attention_weights + + +class DeformableDetrMultiheadAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. + + Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + batch_size, target_len, embed_dim = hidden_states.size() + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states_original = hidden_states + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + # get queries, keys and values + query_states = self.q_proj(hidden_states) * self.scaling + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + source_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" + f" {attn_weights.size()}" + ) + + # expand attention_mask + if attention_mask is not None: + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) + + if attention_mask is not None: + if attention_mask.size() != (batch_size, 1, target_len, source_len): + raise ValueError( + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" + ) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +class DeformableDetrEncoderLayer(nn.Module): + def __init__(self, config: DeformableDetrConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = DeformableDetrMultiscaleDeformableAttention( + config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + position_embeddings: torch.Tensor = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Input to the layer. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Attention mask. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings, to be added to `hidden_states`. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes of the backbone feature maps. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if self.training: + if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class DeformableDetrDecoderLayer(nn.Module): + def __init__(self, config: DeformableDetrConfig): + super().__init__() + self.embed_dim = config.d_model + + # self-attention + self.self_attn = DeformableDetrMultiheadAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + # cross-attention + self.encoder_attn = DeformableDetrMultiscaleDeformableAttention( + config, + num_heads=config.decoder_attention_heads, + n_points=config.decoder_n_points, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + # feedforward neural networks + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(seq_len, batch, embed_dim)`. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings that are added to the queries and keys in the self-attention layer. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(seq_len, batch, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + second_residual = hidden_states + + # Cross-Attention + cross_attn_weights = None + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + attention_mask=encoder_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = second_residual + hidden_states + + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead +class DeformableDetrClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor): + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class DeformableDetrPreTrainedModel(PreTrainedModel): + config_class = DeformableDetrConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = [r"DeformableDetrConvEncoder", r"DeformableDetrEncoderLayer", r"DeformableDetrDecoderLayer"] + supports_gradient_checkpointing = True + + def _init_weights(self, module): + std = self.config.init_std + + if isinstance(module, DeformableDetrLearnedPositionEmbedding): + nn.init.uniform_(module.row_embeddings.weight) + nn.init.uniform_(module.column_embeddings.weight) + elif isinstance(module, DeformableDetrMultiscaleDeformableAttention): + module._reset_parameters() + elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if hasattr(module, "reference_points") and not self.config.two_stage: + nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) + nn.init.constant_(module.reference_points.bias.data, 0.0) + if hasattr(module, "level_embed"): + nn.init.normal_(module.level_embed) + + +DEFORMABLE_DETR_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`DeformableDetrConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +DEFORMABLE_DETR_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. + + Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`] + for details. + + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): + Not used by default. Can be used to mask object queries. + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you + can choose to directly pass a flattened representation of an image. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an + embedded representation. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +class DeformableDetrEncoder(DeformableDetrPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a + [`DeformableDetrEncoderLayer`]. + + The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. + + Args: + config: DeformableDetrConfig + """ + + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + self.gradient_checkpointing = False + + self.dropout = config.dropout + self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) + + # Initialize weights and apply final processing + self.post_init() + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """ + Get reference points for each feature map. Used in decoder. + + Args: + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Valid ratios of each feature map. + device (`torch.device`): + Device on which to create the tensors. + Returns: + `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` + """ + reference_points_list = [] + for level, (height, width) in enumerate(spatial_shapes): + ref_y, ref_x = meshgrid( + torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), + torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), + indexing="ij", + ) + # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + position_embeddings=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + - 1 for pixel features that are real (i.e. **not masked**), + - 0 for pixel features that are padding (i.e. **masked**). + [What are attention masks?](../glossary#attention-mask) + position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): + Starting index of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Ratio of valid area in each feature level. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = inputs_embeds + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for i, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + position_embeddings, + reference_points, + spatial_shapes, + level_start_index, + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class DeformableDetrDecoder(DeformableDetrPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`]. + + The decoder updates the query embeddings through multiple self-attention and cross-attention layers. + + Some tweaks for Deformable DETR: + + - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. + - it also returns a stack of intermediate outputs and reference points from all decoding layers. + + Args: + config: DeformableDetrConfig + """ + + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.gradient_checkpointing = False + + # hack implementation for iterative bounding box refinement and two-stage Deformable DETR + self.bbox_embed = None + self.class_embed = None + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + The query embeddings that are passed into the decoder. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected + in `[0, 1]`: + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Position embeddings that are added to the queries and keys in each self-attention layer. + reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): + Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. + spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of the feature maps. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): + Indexes for the start of each feature level. In range `[0, sequence_length]`. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): + Ratio of valid area in each feature level. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is not None: + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + intermediate = () + intermediate_reference_points = () + + for idx, decoder_layer in enumerate(self.layers): + num_coordinates = reference_points.shape[-1] + if num_coordinates == 4: + reference_points_input = ( + reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] + ) + elif reference_points.shape[-1] == 2: + reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] + else: + raise ValueError("Reference points' last dimension must be of size 2") + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + position_embeddings, + reference_points_input, + spatial_shapes, + level_start_index, + encoder_hidden_states, + encoder_attention_mask, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + position_embeddings=position_embeddings, + encoder_hidden_states=encoder_hidden_states, + reference_points=reference_points_input, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + # hack implementation for iterative bounding box refinement + if self.bbox_embed is not None: + tmp = self.bbox_embed[idx](hidden_states) + num_coordinates = reference_points.shape[-1] + if num_coordinates == 4: + new_reference_points = tmp + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + elif num_coordinates == 2: + new_reference_points = tmp + new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + raise ValueError( + f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}" + ) + reference_points = new_reference_points.detach() + + intermediate += (hidden_states,) + intermediate_reference_points += (reference_points,) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # Keep batch_size as first dimension + intermediate = torch.stack(intermediate, dim=1) + intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + intermediate, + intermediate_reference_points, + all_hidden_states, + all_self_attns, + all_cross_attentions, + ] + if v is not None + ) + return DeformableDetrDecoderOutput( + last_hidden_state=hidden_states, + intermediate_hidden_states=intermediate, + intermediate_reference_points=intermediate_reference_points, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + """ + The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw + hidden-states without any specific head on top. + """, + DEFORMABLE_DETR_START_DOCSTRING, +) +class DeformableDetrModel(DeformableDetrPreTrainedModel): + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + # Create backbone + positional encoding + backbone = DeformableDetrConvEncoder(config) + position_embeddings = build_position_encoding(config) + self.backbone = DeformableDetrConvModel(backbone, position_embeddings) + + # Create input projection layers + if config.num_feature_levels > 1: + num_backbone_outs = len(backbone.intermediate_channel_sizes) + input_proj_list = [] + for _ in range(num_backbone_outs): + in_channels = backbone.intermediate_channel_sizes[_] + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ) + for _ in range(config.num_feature_levels - num_backbone_outs): + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), + nn.GroupNorm(32, config.d_model), + ) + ) + in_channels = config.d_model + self.input_proj = nn.ModuleList(input_proj_list) + else: + self.input_proj = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ] + ) + + if not config.two_stage: + self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) + + self.encoder = DeformableDetrEncoder(config) + self.decoder = DeformableDetrDecoder(config) + + self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) + + if config.two_stage: + self.enc_output = nn.Linear(config.d_model, config.d_model) + self.enc_output_norm = nn.LayerNorm(config.d_model) + self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) + self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) + else: + self.reference_points = nn.Linear(config.d_model, 2) + + self.post_init() + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def freeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(False) + + def unfreeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(True) + + def get_valid_ratio(self, mask, dtype=torch.float32): + """Get the valid ratio of all feature maps.""" + + _, height, width = mask.shape + valid_height = torch.sum(mask[:, :, 0], 1) + valid_width = torch.sum(mask[:, 0, :], 1) + valid_ratio_height = valid_height.to(dtype) / height + valid_ratio_width = valid_width.to(dtype) / width + valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1) + return valid_ratio + + def get_proposal_pos_embed(self, proposals): + """Get the position embedding of the proposals.""" + + num_pos_feats = self.config.d_model // 2 + temperature = 10000 + scale = 2 * math.pi + + dim_t = torch.arange(num_pos_feats, dtype=torch.int64, device=proposals.device).float() + dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) + # batch_size, num_queries, 4 + proposals = proposals.sigmoid() * scale + # batch_size, num_queries, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) + return pos + + def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): + """Generate the encoder output proposals from encoded enc_output. + + Args: + enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. + padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. + spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. + + Returns: + `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. + - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to + directly predict a bounding box. (without the need of a decoder) + - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse + sigmoid. + """ + batch_size = enc_output.shape[0] + proposals = [] + _cur = 0 + for level, (height, width) in enumerate(spatial_shapes): + mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) + valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = meshgrid( + torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), + torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), + indexing="ij", + ) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale + width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) + proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) + proposals.append(proposal) + _cur += height * width + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid + output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) + output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) + + # assign each pixel as an object query + object_query = enc_output + object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) + object_query = object_query.masked_fill(~output_proposals_valid, float(0)) + object_query = self.enc_output_norm(self.enc_output(object_query)) + return object_query, output_proposals + + @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.FloatTensor, + pixel_mask: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.FloatTensor] = None, + encoder_outputs: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], DeformableDetrModelOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, DeformableDetrModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") + >>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr") + + >>> inputs = image_processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 300, 256] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_channels, height, width = pixel_values.shape + device = pixel_values.device + + if pixel_mask is None: + pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) + + # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) + # First, sent pixel_values + pixel_mask through Backbone to obtain the features + # which is a list of tuples + features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) + + # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + sources = [] + masks = [] + for level, (source, mask) in enumerate(features): + sources.append(self.input_proj[level](source)) + masks.append(mask) + if mask is None: + raise ValueError("No attention mask was provided") + + # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage + if self.config.num_feature_levels > len(sources): + _len_sources = len(sources) + for level in range(_len_sources, self.config.num_feature_levels): + if level == _len_sources: + source = self.input_proj[level](features[-1][0]) + else: + source = self.input_proj[level](sources[-1]) + mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] + pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) + sources.append(source) + masks.append(mask) + position_embeddings_list.append(pos_l) + + # Create queries + query_embeds = None + if not self.config.two_stage: + query_embeds = self.query_position_embeddings.weight + + # Prepare encoder inputs (by flattening) + source_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): + batch_size, num_channels, height, width = source.shape + spatial_shape = (height, width) + spatial_shapes.append(spatial_shape) + source = source.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + source_flatten.append(source) + mask_flatten.append(mask) + source_flatten = torch.cat(source_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1) + + # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder + # Also provide spatial_shapes, level_start_index and valid_ratios + if encoder_outputs is None: + encoder_outputs = self.encoder( + inputs_embeds=source_flatten, + attention_mask=mask_flatten, + position_embeddings=lvl_pos_embed_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # Fifth, prepare decoder inputs + batch_size, _, num_channels = encoder_outputs[0].shape + enc_outputs_class = None + enc_outputs_coord_logits = None + if self.config.two_stage: + object_query_embedding, output_proposals = self.gen_encoder_output_proposals( + encoder_outputs[0], ~mask_flatten, spatial_shapes + ) + + # hack implementation for two-stage Deformable DETR + # apply a detection head to each pixel (A.4 in paper) + # linear projection for bounding box binary classification (i.e. foreground and background) + enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) + # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) + delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) + enc_outputs_coord_logits = delta_bbox + output_proposals + + # only keep top scoring `config.two_stage_num_proposals` proposals + topk = self.config.two_stage_num_proposals + topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_logits = torch.gather( + enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) + ) + + topk_coords_logits = topk_coords_logits.detach() + reference_points = topk_coords_logits.sigmoid() + init_reference_points = reference_points + pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) + query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) + else: + query_embed, target = torch.split(query_embeds, num_channels, dim=1) + query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) + target = target.unsqueeze(0).expand(batch_size, -1, -1) + reference_points = self.reference_points(query_embed).sigmoid() + init_reference_points = reference_points + + decoder_outputs = self.decoder( + inputs_embeds=target, + position_embeddings=query_embed, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) + tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs + + return tuple_outputs + + return DeformableDetrModelOutput( + init_reference_points=init_reference_points, + last_hidden_state=decoder_outputs.last_hidden_state, + intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, + intermediate_reference_points=decoder_outputs.intermediate_reference_points, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + enc_outputs_class=enc_outputs_class, + enc_outputs_coord_logits=enc_outputs_coord_logits, + ) + + +@add_start_docstrings( + """ + Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on + top, for tasks such as COCO detection. + """, + DEFORMABLE_DETR_START_DOCSTRING, +) +class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel): + # When using clones, all layers > 0 will be clones, but layer 0 *is* required + _tied_weights_keys = [r"bbox_embed\.[1-9]\d*", r"class_embed\.[1-9]\d*"] + # We can't initialize the model on meta device as some weights are modified during the initialization + _no_split_modules = None + + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + # Deformable DETR encoder-decoder model + self.model = DeformableDetrModel(config) + + # Detection heads on top + self.class_embed = nn.Linear(config.d_model, config.num_labels) + self.bbox_embed = DeformableDetrMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + + prior_prob = 0.01 + bias_value = -math.log((1 - prior_prob) / prior_prob) + self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value + nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) + nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) + + # if two-stage, the last class_embed and bbox_embed is for region proposal generation + num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers + if config.with_box_refine: + self.class_embed = _get_clones(self.class_embed, num_pred) + self.bbox_embed = _get_clones(self.bbox_embed, num_pred) + nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) + # hack implementation for iterative bounding box refinement + self.model.decoder.bbox_embed = self.bbox_embed + else: + nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) + self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) + self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) + self.model.decoder.bbox_embed = None + if config.two_stage: + # hack implementation for two-stage + self.model.decoder.class_embed = self.class_embed + for box_embed in self.bbox_embed: + nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) + + # Initialize weights and apply final processing + self.post_init() + + # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.FloatTensor, + pixel_mask: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.FloatTensor] = None, + encoder_outputs: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[List[dict]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], DeformableDetrObjectDetectionOutput]: + r""" + labels (`List[Dict]` of len `(batch_size,)`, *optional*): + Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the + following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch + respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes + in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, DeformableDetrForObjectDetection + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") + >>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr") + + >>> inputs = image_processor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) + >>> target_sizes = torch.tensor([image.size[::-1]]) + >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ + ... 0 + ... ] + >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): + ... box = [round(i, 2) for i in box.tolist()] + ... print( + ... f"Detected {model.config.id2label[label.item()]} with confidence " + ... f"{round(score.item(), 3)} at location {box}" + ... ) + Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78] + Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25] + Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # First, sent images through DETR base model to obtain encoder + decoder outputs + outputs = self.model( + pixel_values, + pixel_mask=pixel_mask, + decoder_attention_mask=decoder_attention_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] + init_reference = outputs.init_reference_points if return_dict else outputs[0] + inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] + + # class logits + predicted bounding boxes + outputs_classes = [] + outputs_coords = [] + + for level in range(hidden_states.shape[1]): + if level == 0: + reference = init_reference + else: + reference = inter_references[:, level - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.class_embed[level](hidden_states[:, level]) + delta_bbox = self.bbox_embed[level](hidden_states[:, level]) + if reference.shape[-1] == 4: + outputs_coord_logits = delta_bbox + reference + elif reference.shape[-1] == 2: + delta_bbox[..., :2] += reference + outputs_coord_logits = delta_bbox + else: + raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") + outputs_coord = outputs_coord_logits.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + outputs_class = torch.stack(outputs_classes) + outputs_coord = torch.stack(outputs_coords) + + logits = outputs_class[-1] + pred_boxes = outputs_coord[-1] + + loss, loss_dict, auxiliary_outputs = None, None, None + if labels is not None: + # First: create the matcher + matcher = DeformableDetrHungarianMatcher( + class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost + ) + # Second: create the criterion + losses = ["labels", "boxes", "cardinality"] + criterion = DeformableDetrLoss( + matcher=matcher, + num_classes=self.config.num_labels, + focal_alpha=self.config.focal_alpha, + losses=losses, + ) + criterion.to(self.device) + # Third: compute the losses, based on outputs and labels + outputs_loss = {} + outputs_loss["logits"] = logits + outputs_loss["pred_boxes"] = pred_boxes + if self.config.auxiliary_loss: + auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) + outputs_loss["auxiliary_outputs"] = auxiliary_outputs + if self.config.two_stage: + enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() + outputs_loss["enc_outputs"] = {"logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} + + loss_dict = criterion(outputs_loss, labels) + # Fourth: compute total loss, as a weighted sum of the various losses + weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} + weight_dict["loss_giou"] = self.config.giou_loss_coefficient + if self.config.auxiliary_loss: + aux_weight_dict = {} + for i in range(self.config.decoder_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + if not return_dict: + if auxiliary_outputs is not None: + output = (logits, pred_boxes) + auxiliary_outputs + outputs + else: + output = (logits, pred_boxes) + outputs + tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output + + return tuple_outputs + + dict_outputs = DeformableDetrObjectDetectionOutput( + loss=loss, + loss_dict=loss_dict, + logits=logits, + pred_boxes=pred_boxes, + auxiliary_outputs=auxiliary_outputs, + last_hidden_state=outputs.last_hidden_state, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + intermediate_hidden_states=outputs.intermediate_hidden_states, + intermediate_reference_points=outputs.intermediate_reference_points, + init_reference_points=outputs.init_reference_points, + enc_outputs_class=outputs.enc_outputs_class, + enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, + ) + + return dict_outputs + + +# Copied from transformers.models.detr.modeling_detr.dice_loss +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs (0 for the negative class and 1 for the positive + class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + + Args: + inputs (`torch.FloatTensor` of arbitrary shape): + The predictions for each example. + targets (`torch.FloatTensor` with the same shape as `inputs`) + A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class + and 1 for the positive class). + alpha (`float`, *optional*, defaults to `0.25`): + Optional weighting factor in the range (0,1) to balance positive vs. negative examples. + gamma (`int`, *optional*, defaults to `2`): + Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. + + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + # add modulating factor + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +class DeformableDetrLoss(nn.Module): + """ + This class computes the losses for `DeformableDetrForObjectDetection`. The process happens in two steps: 1) we + compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of + matched ground-truth / prediction (supervise class and box). + + Args: + matcher (`DeformableDetrHungarianMatcher`): + Module able to compute a matching between targets and proposals. + num_classes (`int`): + Number of object categories, omitting the special no-object category. + focal_alpha (`float`): + Alpha parameter in focal loss. + losses (`List[str]`): + List of all the losses to be applied. See `get_loss` for a list of all available losses. + """ + + def __init__(self, matcher, num_classes, focal_alpha, losses): + super().__init__() + self.matcher = matcher + self.num_classes = num_classes + self.focal_alpha = focal_alpha + self.losses = losses + + # removed logging parameter, which was part of the original implementation + def loss_labels(self, outputs, targets, indices, num_boxes): + """ + Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor + of dim [nb_target_boxes] + """ + if "logits" not in outputs: + raise KeyError("No logits were found in the outputs") + source_logits = outputs["logits"] + + idx = self._get_source_permutation_idx(indices) + target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full( + source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device + ) + target_classes[idx] = target_classes_o + + target_classes_onehot = torch.zeros( + [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], + dtype=source_logits.dtype, + layout=source_logits.layout, + device=source_logits.device, + ) + target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) + + target_classes_onehot = target_classes_onehot[:, :, :-1] + loss_ce = ( + sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) + * source_logits.shape[1] + ) + losses = {"loss_ce": loss_ce} + + return losses + + @torch.no_grad() + # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ + Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. + + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. + """ + logits = outputs["logits"] + device = logits.device + target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) + card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) + losses = {"cardinality_error": card_err} + return losses + + # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes + def loss_boxes(self, outputs, targets, indices, num_boxes): + """ + Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. + + Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes + are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + if "pred_boxes" not in outputs: + raise KeyError("No predicted boxes found in outputs") + idx = self._get_source_permutation_idx(indices) + source_boxes = outputs["pred_boxes"][idx] + target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") + + losses = {} + losses["loss_bbox"] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag( + generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) + ) + losses["loss_giou"] = loss_giou.sum() / num_boxes + return losses + + # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx + def _get_source_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) + source_idx = torch.cat([source for (source, _) in indices]) + return batch_idx, source_idx + + # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx + def _get_target_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) + target_idx = torch.cat([target for (_, target) in indices]) + return batch_idx, target_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes): + loss_map = { + "labels": self.loss_labels, + "cardinality": self.loss_cardinality, + "boxes": self.loss_boxes, + } + if loss not in loss_map: + raise ValueError(f"Loss {loss} not supported") + return loss_map[loss](outputs, targets, indices, num_boxes) + + def forward(self, outputs, targets): + """ + This performs the loss computation. + + Args: + outputs (`dict`, *optional*): + Dictionary of tensors, see the output specification of the model for the format. + targets (`List[dict]`, *optional*): + List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the + losses applied, see each loss' doc. + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs" and k != "enc_outputs"} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["class_labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + world_size = 1 + if is_accelerate_available(): + if PartialState._shared_state != {}: + num_boxes = reduce(num_boxes) + world_size = PartialState().num_processes + num_boxes = torch.clamp(num_boxes / world_size, min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "auxiliary_outputs" in outputs: + for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): + indices = self.matcher(auxiliary_outputs, targets) + for loss in self.losses: + l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + if "enc_outputs" in outputs: + enc_outputs = outputs["enc_outputs"] + bin_targets = copy.deepcopy(targets) + for bt in bin_targets: + bt["class_labels"] = torch.zeros_like(bt["class_labels"]) + indices = self.matcher(enc_outputs, bin_targets) + for loss in self.losses: + l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes) + l_dict = {k + "_enc": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead +class DeformableDetrMLPPredictionHead(nn.Module): + """ + Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, + height and width of a bounding box w.r.t. an image. + + Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py + + """ + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +class DeformableDetrHungarianMatcher(nn.Module): + """ + This class computes an assignment between the targets and the predictions of the network. + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more + predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + Args: + class_cost: + The relative weight of the classification error in the matching cost. + bbox_cost: + The relative weight of the L1 error of the bounding box coordinates in the matching cost. + giou_cost: + The relative weight of the giou loss of the bounding box in the matching cost. + """ + + def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): + super().__init__() + requires_backends(self, ["scipy"]) + + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: + raise ValueError("All costs of the Matcher can't be 0") + + @torch.no_grad() + def forward(self, outputs, targets): + """ + Args: + outputs (`dict`): + A dictionary that contains at least these entries: + * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. + targets (`List[dict]`): + A list of targets (len(targets) = batch_size), where each target is a dict containing: + * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of + ground-truth + objects in the target) containing the class labels + * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. + + Returns: + `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + batch_size, num_queries = outputs["logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + target_ids = torch.cat([v["class_labels"] for v in targets]) + target_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. + alpha = 0.25 + gamma = 2.0 + neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) + pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] + + # Compute the L1 cost between boxes + bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) + + # Compute the giou cost between boxes + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) + + # Final cost matrix + cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost + cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +# Copied from transformers.models.detr.modeling_detr._upcast +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +# Copied from transformers.models.detr.modeling_detr.box_area +def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. + + Args: + boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): + Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 + < x2` and `0 <= y1 < y2`. + + Returns: + `torch.FloatTensor`: a tensor containing the area for each box. + """ + boxes = _upcast(boxes) + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# Copied from transformers.models.detr.modeling_detr.box_iou +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] + inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +# Copied from transformers.models.detr.modeling_detr.generalized_box_iou +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. + + Returns: + `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") + iou, union = box_iou(boxes1, boxes2) + + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] + + return iou - (area - union) / area + + +# Copied from transformers.models.detr.modeling_detr._max_by_axis +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +# Copied from transformers.models.detr.modeling_detr.NestedTensor +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + if tensor_list[0].ndim == 3: + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + batch_shape = [len(tensor_list)] + max_size + batch_size, num_channels, height, width = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("Only 3-dimensional tensors are supported") + return NestedTensor(tensor, mask) diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/jamba/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b7c2137b209cbf31c3c1870aa45e6a94b4dbfb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jamba/__init__.py @@ -0,0 +1,58 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_jamba": ["JambaConfig"], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_jamba"] = [ + "JambaForCausalLM", + "JambaForSequenceClassification", + "JambaModel", + "JambaPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_jamba import JambaConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_jamba import ( + JambaForCausalLM, + JambaForSequenceClassification, + JambaModel, + JambaPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93e255f1ff5664a4a8b34fc09f903e5aced00c2a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3a8a1398967171957fcd7621dd0b249176a5362 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/configuration_jamba.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9addb326613d448b6c5b2d8df18141d7fb1fa9d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jamba/__pycache__/modeling_jamba.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py b/venv/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py new file mode 100644 index 0000000000000000000000000000000000000000..de9cd378bdc1a5903d6ed7077146ac3f4e8ff731 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jamba/configuration_jamba.py @@ -0,0 +1,223 @@ +# coding=utf-8 +# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Jamba model configuration""" +import math + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class JambaConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`JambaModel`]. It is used to instantiate a + Jamba model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Jamba-v0.1 model. + + [ai21labs/Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1) + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 65536): + Vocabulary size of the Jamba model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`JambaModel`] + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the + model has a output word embedding layer. + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 14336): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*, defaults to 8): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + num_logits_to_keep (`int` or `None`, *optional*, defaults to 1): + Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an + integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the + logits of the last prompt token are needed for generation. For long sequences, the logits for the entire + sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint + significantly. + output_router_logits (`bool`, *optional*, defaults to `False`): + Whether or not the router logits should be returned by the model. Enabling this will also + allow the model to output the auxiliary loss. See [here]() for more details + router_aux_loss_coef (`float`, *optional*, defaults to 0.001): + The aux loss factor for the total loss. + pad_token_id (`int`, *optional*, defaults to 0): + The id of the padding token. + bos_token_id (`int`, *optional*, defaults to 1): + The id of the "beginning-of-sequence" token. + eos_token_id (`int`, *optional*, defaults to 2): + The id of the "end-of-sequence" token. + sliding_window (`int`, *optional*): + Sliding window attention window size. If not specified, will default to `None`. + max_position_embeddings (`int`, *optional*, defaults to 262144): + This value doesn't have any real effect. The maximum sequence length that this model is intended to be + used with. It can be used with longer sequences, but performance may degrade. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + num_experts_per_tok (`int`, *optional*, defaults to 2): + The number of experts to root per-token, can be also interpreted as the `top-p` routing + parameter + num_experts (`int`, *optional*, defaults to 16): + Number of experts per Sparse MLP layer. + expert_layer_period (`int`, *optional*, defaults to 2): + Once in this many layers, we will have an expert layer + expert_layer_offset (`int`, *optional*, defaults to 1): + The first layer index that contains an expert mlp layer + attn_layer_period (`int`, *optional*, defaults to 8): + Once in this many layers, we will have a vanilla attention layer + attn_layer_offset (`int`, *optional*, defaults to 4): + The first layer index that contains a vanilla attention mlp layer + use_mamba_kernels (`bool`, *optional*, defaults to `True`): + Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and + `causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if + `True` and kernels are not available + mamba_d_state (`int`, *optional*, defaults to 16): + The dimension the mamba state space latents + mamba_d_conv (`int`, *optional*, defaults to 4): + The size of the mamba convolution kernel + mamba_expand (`int`, *optional*, defaults to 2): + Expanding factor (relative to hidden_size) used to determine the mamba intermediate size + mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): + Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` + mamba_conv_bias (`bool`, *optional*, defaults to `True`): + Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block. + mamba_proj_bias (`bool`, *optional*, defaults to `False`): + Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block + + """ + + model_type = "jamba" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=65536, + tie_word_embeddings=False, + hidden_size=4096, + intermediate_size=14336, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=8, + hidden_act="silu", + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + num_logits_to_keep=1, + output_router_logits=False, + router_aux_loss_coef=0.001, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + sliding_window=None, + max_position_embeddings=262144, + attention_dropout=0.0, + num_experts_per_tok=2, + num_experts=16, + expert_layer_period=2, + expert_layer_offset=1, + attn_layer_period=8, + attn_layer_offset=4, + use_mamba_kernels=True, + mamba_d_state=16, + mamba_d_conv=4, + mamba_expand=2, + mamba_dt_rank="auto", + mamba_conv_bias=True, + mamba_proj_bias=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.tie_word_embeddings = tie_word_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.sliding_window = sliding_window + self.max_position_embeddings = max_position_embeddings + self.attention_dropout = attention_dropout + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + + self.use_cache = use_cache + self.num_logits_to_keep = num_logits_to_keep + self.output_router_logits = output_router_logits + self.router_aux_loss_coef = router_aux_loss_coef + + self.num_experts_per_tok = num_experts_per_tok + self.num_experts = num_experts + self.expert_layer_period = expert_layer_period + self.expert_layer_offset = expert_layer_offset + self.attn_layer_period = attn_layer_period + self.attn_layer_offset = attn_layer_offset + + self.use_mamba_kernels = use_mamba_kernels + self.mamba_d_state = mamba_d_state + self.mamba_d_conv = mamba_d_conv + self.mamba_expand = mamba_expand + self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == "auto" else mamba_dt_rank + self.mamba_conv_bias = mamba_conv_bias + self.mamba_proj_bias = mamba_proj_bias + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + @property + def layers_block_type(self): + return [ + "attention" if i % self.attn_layer_period == self.attn_layer_offset else "mamba" + for i in range(self.num_hidden_layers) + ] + + @property + def layers_num_experts(self): + return [ + self.num_experts if i % self.expert_layer_period == self.expert_layer_offset else 1 + for i in range(self.num_hidden_layers) + ] diff --git a/venv/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py b/venv/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py new file mode 100644 index 0000000000000000000000000000000000000000..9780d95d4ee37604335163b0d6c865c8da542e53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jamba/modeling_jamba.py @@ -0,0 +1,1882 @@ +# coding=utf-8 +# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Jamba model.""" +import inspect +import math +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...cache_utils import DynamicCache # we need __iter__ and __len__ of pkv +from ...modeling_attn_mask_utils import ( + AttentionMaskConverter, +) +from ...modeling_outputs import ( + MoeCausalLMOutputWithPast, + MoeModelOutputWithPast, + SequenceClassifierOutputWithPast, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from ...utils.import_utils import ( + is_causal_conv1d_available, + is_flash_attn_2_available, + is_mamba_ssm_available, +) +from .configuration_jamba import JambaConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) + + +if is_mamba_ssm_available(): + from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +else: + selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None + +if is_causal_conv1d_available(): + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +else: + causal_conv1d_update, causal_conv1d_fn = None, None + +is_fast_path_available = all( + (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn) +) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "JambaConfig" + + +# Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func with gate->router +def load_balancing_loss_func( + router_logits: torch.Tensor, + num_experts: torch.Tensor = None, + top_k=2, + attention_mask: Optional[torch.Tensor] = None, +) -> float: + r""" + Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. + + See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss + function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between + experts is too unbalanced. + + Args: + router_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): + Logits from the `router`, should be a tuple of model.config.num_hidden_layers tensors of + shape [batch_size X sequence_length, num_experts]. + attention_mask (`torch.Tensor`, None): + The attention_mask used in forward function + shape [batch_size X sequence_length] if not None. + num_experts (`int`, *optional*): + Number of experts + + Returns: + The auxiliary loss. + """ + if router_logits is None or not isinstance(router_logits, tuple): + return 0 + + if isinstance(router_logits, tuple): + compute_device = router_logits[0].device + concatenated_router_logits = torch.cat( + [layer_router.to(compute_device) for layer_router in router_logits], dim=0 + ) + + routing_weights = torch.nn.functional.softmax(concatenated_router_logits, dim=-1) + + _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) + + expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) + + if attention_mask is None: + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.mean(expert_mask.float(), dim=0) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.mean(routing_weights, dim=0) + else: + batch_size, sequence_length = attention_mask.shape + num_hidden_layers = concatenated_router_logits.shape[0] // (batch_size * sequence_length) + + # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask + expert_attention_mask = ( + attention_mask[None, :, :, None, None] + .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) + .reshape(-1, top_k, num_experts) + .to(compute_device) + ) + + # Compute the percentage of tokens routed to each experts + tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( + expert_attention_mask, dim=0 + ) + + # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert + router_per_expert_attention_mask = ( + attention_mask[None, :, :, None] + .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) + .reshape(-1, num_experts) + .to(compute_device) + ) + + # Compute the average probability of routing to these experts + router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( + router_per_expert_attention_mask, dim=0 + ) + + overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) + return overall_loss * num_experts + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Jamba +class JambaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + JambaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class HybridMambaAttentionDynamicCache(DynamicCache): + """ + A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache + (which has a constant shape regardless of seq_len). + + This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` + and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor + For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, + while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). + For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), + while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, + and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. + """ + + def __init__(self, config, batch_size, dtype=torch.float16, device=None): + self.dtype = dtype + self.layers_block_type = config.layers_block_type + self.has_previous_state = False # only used by mamba + intermediate_size = config.mamba_expand * config.hidden_size + ssm_state_size = config.mamba_d_state + conv_kernel_size = config.mamba_d_conv + self.conv_states = [] + self.ssm_states = [] + for i in range(config.num_hidden_layers): + if self.layers_block_type[i] == "mamba": + self.conv_states += [ + torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype) + ] + self.ssm_states += [ + torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=dtype) + ] + else: + self.conv_states += [torch.tensor([[]] * batch_size, device=device)] + self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] + + self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] + self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Update the cache + if self.key_cache[layer_idx].shape[-1] == 0: + self.key_cache[layer_idx] = key_states + self.value_cache[layer_idx] = value_states + else: + self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) + self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) + + return self.key_cache[layer_idx], self.value_cache[layer_idx] + + def reorder_cache(self, beam_idx: torch.LongTensor): + """Reorders the cache for beam search, given the selected beam indices.""" + for layer_idx in range(len(self.key_cache)): + device = self.key_cache[layer_idx].device + self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) + device = self.value_cache[layer_idx].device + self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) + + device = self.conv_states[layer_idx].device + self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) + device = self.ssm_states[layer_idx].device + self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device)) + + def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]: + raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") + + @classmethod + def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache": + raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.") + + +# Adapted from transformers.models.mistral.modeling_mistral.MistralAttention with Mistral->Jamba +class JambaAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: JambaConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if past_key_value is not None: + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba +class JambaFlashAttention2(JambaAttention): + """ + Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ): + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = cache_position[-1] + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + ) + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" + " make sure to upgrade flash-attn library." + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = cache_position[0] > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=dropout_rate, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + use_sliding_windows (`bool`, *optional*): + Whether to activate sliding window attention. + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + if not use_sliding_windows: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + if not use_sliding_windows: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + return attn_output + + # Copied from transformers.models.mixtral.modeling_mixtral.MixtralFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba +class JambaSdpaAttention(JambaAttention): + """ + Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from JambaAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "JambaModel is using JambaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if past_key_value is not None: + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: + causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal=self.is_causal and attention_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +JAMBA_ATTENTION_CLASSES = { + "eager": JambaAttention, + "flash_attention_2": JambaFlashAttention2, + "sdpa": JambaSdpaAttention, +} + + +# Adapted from transformers.models.mamba.modeling_mamba.MambaMixer +class JambaMambaMixer(nn.Module): + """ + Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. + A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) + ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, + and is why Mamba is called **selective** state spaces) + """ + + def __init__(self, config: JambaConfig, layer_idx): + super().__init__() + self.config = config + self.layer_idx = layer_idx + self.hidden_size = config.hidden_size + self.ssm_state_size = config.mamba_d_state + self.conv_kernel_size = config.mamba_d_conv + self.intermediate_size = config.mamba_expand * config.hidden_size + self.time_step_rank = config.mamba_dt_rank + self.use_conv_bias = config.mamba_conv_bias + self.use_bias = config.mamba_proj_bias + self.conv1d = nn.Conv1d( + in_channels=self.intermediate_size, + out_channels=self.intermediate_size, + bias=self.use_conv_bias, + kernel_size=self.conv_kernel_size, + groups=self.intermediate_size, + padding=self.conv_kernel_size - 1, + ) + + self.activation = config.hidden_act + self.act = ACT2FN[config.hidden_act] + + self.use_fast_kernels = config.use_mamba_kernels + + # projection of the input hidden states + self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias) + # selective projection used to make dt, B and C input dependant + self.x_proj = nn.Linear(self.intermediate_size, self.time_step_rank + self.ssm_state_size * 2, bias=False) + # time step projection (discretization) + self.dt_proj = nn.Linear(self.time_step_rank, self.intermediate_size, bias=True) + + # S4D real initialization. These are not discretized! + # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded + A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] + A = A.expand(self.intermediate_size, -1).contiguous() + + self.A_log = nn.Parameter(torch.log(A)) + self.D = nn.Parameter(torch.ones(self.intermediate_size)) + self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) + + self.dt_layernorm = JambaRMSNorm(self.time_step_rank, eps=config.rms_norm_eps) + self.b_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps) + self.c_layernorm = JambaRMSNorm(self.ssm_state_size, eps=config.rms_norm_eps) + + if not is_fast_path_available: + logger.warning_once( + "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`" + " is None. To install follow https://github.com/state-spaces/mamba/#installation and" + " https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config" + ) + + def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: HybridMambaAttentionDynamicCache = None): + batch_size, seq_len, _ = hidden_states.shape + use_precomputed_states = ( + cache_params is not None + and cache_params.has_previous_state + and seq_len == 1 + and cache_params.conv_states[self.layer_idx].shape[0] + == cache_params.ssm_states[self.layer_idx].shape[0] + == batch_size + ) + # 1. Gated MLP's linear projection + projected_states = self.in_proj(hidden_states).transpose(1, 2) + + # We can't use `mamba_inner_fn` even if in training and without cache params because we have the + # inner layernorms which isn't supported by this fused kernel + hidden_states, gate = projected_states.chunk(2, dim=1) + + # 2. Convolution sequence transformation + conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) + if use_precomputed_states: + hidden_states = causal_conv1d_update( + hidden_states.squeeze(-1), + cache_params.conv_states[self.layer_idx], + conv_weights, + self.conv1d.bias, + self.activation, + ) + hidden_states = hidden_states.unsqueeze(-1) + else: + if cache_params is not None: + conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) + cache_params.conv_states[self.layer_idx].copy_(conv_states) + hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation) + + # 3. State Space Model sequence transformation + # 3.a. input varying initialization of time_step, B and C + ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) + time_step, B, C = torch.split( + ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 + ) + + time_step = self.dt_layernorm(time_step) + B = self.b_layernorm(B) + C = self.c_layernorm(C) + + # Here we need to apply dt_proj without the bias, as the bias is added in the selective scan kernel. + # This is a hack to apply dt_proj while still using the forward pass of `torch.nn.Linear`, which is needed + # in order to make quantization work. Quantization code replaces `torch.nn.Linear` layers with quantized + # linear layers, and requires to call the forward pass directly. + # The original code here was: ```discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)``` + time_proj_bias = self.dt_proj.bias + self.dt_proj.bias = None + discrete_time_step = self.dt_proj(time_step).transpose(1, 2) + self.dt_proj.bias = time_proj_bias + + A = -torch.exp(self.A_log.float()) + # 3.c perform the recurrence y ← SSM(A, B, C)(x) + time_proj_bias = time_proj_bias.float() if time_proj_bias is not None else None + if use_precomputed_states: + scan_outputs = selective_state_update( + cache_params.ssm_states[self.layer_idx], + hidden_states[..., 0], + discrete_time_step[..., 0], + A, + B[:, 0], + C[:, 0], + self.D, + gate[..., 0], + time_proj_bias, + dt_softplus=True, + ).unsqueeze(-1) + else: + scan_outputs, ssm_state = selective_scan_fn( + hidden_states, + discrete_time_step, + A, + B.transpose(1, 2), + C.transpose(1, 2), + self.D.float(), + gate, + time_proj_bias, + delta_softplus=True, + return_last_state=True, + ) + if ssm_state is not None and cache_params is not None: + cache_params.ssm_states[self.layer_idx].copy_(ssm_state) + + # 4. Final linear projection + contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) + + return contextualized_states + + # fmt: off + def slow_forward(self, input_states, cache_params: HybridMambaAttentionDynamicCache = None): + batch_size, seq_len, _ = input_states.shape + dtype = input_states.dtype + # 1. Gated MLP's linear projection + projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len] + hidden_states, gate = projected_states.chunk(2, dim=1) + + use_cache = isinstance(cache_params,HybridMambaAttentionDynamicCache) + # 2. Convolution sequence transformation + if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size: + if self.training: + # In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass + ssm_state = cache_params.ssm_states[self.layer_idx].clone() + else: + ssm_state = cache_params.ssm_states[self.layer_idx] + + if cache_params.has_previous_state and seq_len == 1 and \ + cache_params.conv_states[self.layer_idx].shape[0] == batch_size: + conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size] + conv_state = torch.roll(conv_state, shifts=-1, dims=-1) + conv_state[:, :, -1] = hidden_states[:, :, 0] + cache_params.conv_states[self.layer_idx] = conv_state + hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) + if self.use_conv_bias: + hidden_states += self.conv1d.bias + hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) # [batch, intermediate_size, 1] : decoding + else: + conv_state = nn.functional.pad( + hidden_states, + (self.conv_kernel_size - hidden_states.shape[-1], 0) + ) + cache_params.conv_states[self.layer_idx] = conv_state + hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] + else: + ssm_state = torch.zeros( + (batch_size, self.intermediate_size, self.ssm_state_size), + device=hidden_states.device, dtype=dtype + ) + hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len] + + # 3. State Space Model sequence transformation + # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2] + ssm_parameters = self.x_proj(hidden_states.transpose(1, 2)) + time_step, B, C = torch.split( + ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 + ) + + time_step = self.dt_layernorm(time_step) + B = self.b_layernorm(B) + C = self.c_layernorm(C) + + discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size] + discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(1, 2) # [batch, intermediate_size, seq_len] + + # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM) + A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size] + discrete_A = torch.exp(A[None, :, None, :] * discrete_time_step[:, :, :, None]) # [batch, intermediate_size, seq_len, ssm_state_size] + discrete_B = discrete_time_step[:, :, :, None] * B[:, None, :, :].float() # [batch, intermediade_size, seq_len, ssm_state_size] + deltaB_u = discrete_B * hidden_states[:, :, :, None].float() + + # 3.c perform the recurrence y ← SSM(A, B, C)(x) + scan_outputs = [] + for i in range(seq_len): + ssm_state = discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :] # [batch, intermediade_size, ssm_state] + scan_output = torch.matmul(ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)) # [batch, intermediade_size, 1] + scan_outputs.append(scan_output[:, :, 0]) + scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediade_size, seq_len] + scan_output = scan_output + (hidden_states * self.D[None, :, None]) + scan_output = (scan_output * self.act(gate)) + + if use_cache: + cache_params.ssm_states[self.layer_idx] = ssm_state + + # 4. Final linear projection + contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size] + return contextualized_states + # fmt: on + + def forward(self, hidden_states, cache_params: HybridMambaAttentionDynamicCache = None): + if self.use_fast_kernels: + if not is_fast_path_available or "cuda" not in self.x_proj.weight.device.type: + raise ValueError( + "Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device" + ) + return self.cuda_kernels_forward(hidden_states, cache_params) + return self.slow_forward(hidden_states, cache_params) + + +# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Jamba +class JambaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +# Adapted from transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock with Mistral->Jamba +class JambaSparseMoeBlock(nn.Module): + """ + This implementation is + strictly equivalent to standard MoE with full capacity (no + dropped tokens). It's faster since it formulates MoE operations + in terms of block-sparse operations to accomodate imbalanced + assignments of tokens to experts, whereas standard MoE either + (1) drop tokens at the cost of reduced performance or (2) set + capacity factor to number of experts and thus waste computation + and memory on padding. + """ + + def __init__(self, config: JambaConfig): + super().__init__() + self.hidden_dim = config.hidden_size + self.ffn_dim = config.intermediate_size + self.num_experts = config.num_experts + self.top_k = config.num_experts_per_tok + + self.router = nn.Linear(self.hidden_dim, self.num_experts, bias=False) + self.experts = nn.ModuleList([JambaMLP(config) for _ in range(self.num_experts)]) + + def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ """ + batch_size, sequence_length, hidden_dim = hidden_states.shape + + hidden_states = hidden_states.view(-1, hidden_dim) + # router_logits: (batch * sequence_length, n_experts) + router_logits = self.router(hidden_states) + routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) + routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) + # we cast back to the input dtype + routing_weights = routing_weights.to(hidden_states.dtype) + + final_hidden_states = torch.zeros( + (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device + ) + + # One hot encode the selected experts to create an expert mask + # this will be used to easily index which expert is going to be sollicitated + expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) + + # Loop over all available experts in the model and perform the computation on each expert + for expert_idx in range(self.num_experts): + expert_layer = self.experts[expert_idx] + idx, top_x = torch.where(expert_mask[expert_idx]) + + if top_x.shape[0] == 0: + continue + + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) + current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] + + # However `index_add_` only support torch tensors for indexing so we'll use + # the `top_x` tensor here. + final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) + final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) + return final_hidden_states, router_logits + + +class JambaAttentionDecoderLayer(nn.Module): + def __init__(self, config: JambaConfig, layer_idx: int): + super().__init__() + num_experts = config.layers_num_experts[layer_idx] + self.self_attn = JAMBA_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP + self.feed_forward = ffn_layer_class(config) + self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, + output_attentions: Optional[bool] = False, + output_router_logits: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, and + should not be returned during inference. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + # residual connection after attention + hidden_states = residual + hidden_states + + # feed-forward (experts/MLP) + residual = hidden_states + hidden_states = self.pre_ff_layernorm(hidden_states) + ff_outputs = self.feed_forward(hidden_states) + if isinstance(ff_outputs, tuple): + hidden_states, router_logits = ff_outputs + else: + hidden_states, router_logits = ff_outputs, None + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + if output_router_logits: + outputs += (router_logits,) + + return outputs + + +class JambaMambaDecoderLayer(nn.Module): + def __init__(self, config: JambaConfig, layer_idx: int): + super().__init__() + num_experts = config.layers_num_experts[layer_idx] + self.mamba = JambaMambaMixer(config=config, layer_idx=layer_idx) + + ffn_layer_class = JambaSparseMoeBlock if num_experts > 1 else JambaMLP + self.feed_forward = ffn_layer_class(config) + self.input_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.pre_ff_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[HybridMambaAttentionDynamicCache] = None, + output_attentions: Optional[bool] = False, + output_router_logits: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, and + should not be returned during inference. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + hidden_states = self.mamba( + hidden_states=hidden_states, + cache_params=past_key_value, + ) + self_attn_weights = None + + # residual connection after mamba + hidden_states = residual + hidden_states + + # feed-forward (experts/MLP) + residual = hidden_states + hidden_states = self.pre_ff_layernorm(hidden_states) + ff_outputs = self.feed_forward(hidden_states) + if isinstance(ff_outputs, tuple): + hidden_states, router_logits = ff_outputs + else: + hidden_states, router_logits = ff_outputs, None + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (past_key_value,) + + if output_router_logits: + outputs += (router_logits,) + + return outputs + + +JAMBA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`JambaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Jamba Model outputting raw hidden-states without any specific head on top.", + JAMBA_START_DOCSTRING, +) +class JambaPreTrainedModel(PreTrainedModel): + config_class = JambaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +JAMBA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`HybridMambaAttentionDynamicCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + A HybridMambaAttentionDynamicCache object containing pre-computed hidden-states (keys and values in the + self-attention blocks and convolution and ssm states in the mamba blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + Key and value cache tensors have shape `(batch_size, num_heads, seq_len, head_dim)`. + Convolution and ssm states tensors have shape `(batch_size, d_inner, d_conv)` and + `(batch_size, d_inner, d_state)` respectively. + See the `HybridMambaAttentionDynamicCache` class for more details. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_router_logits (`bool`, *optional*): + Whether or not to return the logits of all the routers. They are useful for computing the router loss, and + should not be returned during inference. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): + Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, + this tensor is not affected by padding. It is used to update the cache in the correct position and to infer + the complete sequence length. +""" + +ALL_DECODER_LAYER_TYPES = {"attention": JambaAttentionDecoderLayer, "mamba": JambaMambaDecoderLayer} + + +@add_start_docstrings( + "The bare Jamba Model outputting raw hidden-states without any specific head on top.", + JAMBA_START_DOCSTRING, +) +# Adapted from transformers.models.mistral.modeling_mistral.MistralModel with MISTRAL->JAMBA, Mistral->Jamba +class JambaModel(JambaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`JambaDecoderLayer`] + + Args: + config: JambaConfig + """ + + def __init__(self, config: JambaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + decoder_layers = [] + for i in range(config.num_hidden_layers): + layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]] + decoder_layers.append(layer_class(config, layer_idx=i)) + self.layers = nn.ModuleList(decoder_layers) + + self._attn_implementation = config._attn_implementation + self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, MoeModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + hidden_states = inputs_embeds + + if use_cache and past_key_values is None: + logger.warning_once( + "Jamba requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. None was " + "provided, so no cache will be returned." + ) + + if cache_position is None: + cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position) + + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_router_logits = () if output_router_logits else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + output_router_logits, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + output_router_logits=output_router_logits, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + if layer_outputs[1] is not None: + # append attentions only of attention layers. Mamba layers return `None` as the attention weights + all_self_attns += (layer_outputs[1],) + + if output_router_logits: + if layer_outputs[-1] is not None: + # append router logits only of expert layers. Regular MLP layers return `None` as the router logits + all_router_logits += (layer_outputs[-1],) + + hidden_states = self.final_layernorm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if past_key_values and not past_key_values.has_previous_state: + past_key_values.has_previous_state = True + + next_cache = None if not use_cache else past_key_values + + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits] + if v is not None + ) + return MoeModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + router_logits=all_router_logits, + ) + + def _update_causal_mask(self, attention_mask, input_tensor, cache_position): + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + target_length = cache_position[-1] + 1 + + causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + if attention_mask.dim() == 2: + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) + causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +# Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->Jamba +class JambaForCausalLM(JambaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config: JambaConfig): + super().__init__(config) + self.model = JambaModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.router_aux_loss_coef = config.router_aux_loss_coef + self.num_experts = config.num_experts + self.num_experts_per_tok = config.num_experts_per_tok + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + # Ignore copy + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[HybridMambaAttentionDynamicCache] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + num_logits_to_keep: Optional[Union[int, None]] = None, + ) -> Union[Tuple, MoeCausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + num_logits_to_keep (`int` or `None`, *optional*): + Calculate logits for the last `num_logits_to_keep` tokens. If `None`, calculate logits for all + `input_ids`. Only last token logits are needed for generation, and calculating them only for that token + can save memory, which becomes pretty significant for long sequences. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, JambaForCausalLM + + >>> model = JambaForCausalLM.from_pretrained("ai21labs/Jamba-v0.1") + >>> tokenizer = AutoTokenizer.from_pretrained("ai21labs/Jamba-v0.1") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_router_logits = ( + output_router_logits if output_router_logits is not None else self.config.output_router_logits + ) + + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_router_logits=output_router_logits, + cache_position=cache_position, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if num_logits_to_keep is None: + logits = self.lm_head(hidden_states) + else: + logits = self.lm_head(hidden_states[..., -num_logits_to_keep:, :]) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + aux_loss = None + if output_router_logits: + aux_loss = load_balancing_loss_func( + outputs.router_logits if return_dict else outputs[-1], + self.num_experts, + self.num_experts_per_tok, + attention_mask, + ) + if labels is not None: + loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device + + if not return_dict: + output = (logits,) + outputs[1:] + if output_router_logits: + output = (aux_loss,) + output + return (loss,) + output if loss is not None else output + + return MoeCausalLMOutputWithPast( + loss=loss, + aux_loss=aux_loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_logits, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + output_router_logits=False, + cache_position=None, + **kwargs, + ): + empty_past_kv = past_key_values is None + + # Omit tokens covered by past_key_values + if not empty_past_kv: + past_length = cache_position[0] if cache_position is not None else attention_mask.shape[1] + max_cache_length = self.config.sliding_window + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and past_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + else: + past_key_values = HybridMambaAttentionDynamicCache( + self.config, input_ids.shape[0], self.dtype, device=self.device + ) + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if not empty_past_kv: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and empty_past_kv: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + "output_router_logits": output_router_logits, + "num_logits_to_keep": self.config.num_logits_to_keep, + "cache_position": cache_position, + } + ) + return model_inputs + + +@add_start_docstrings( + """ + The Jamba Model with a sequence classification head on top (linear layer). + + [`JambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + JAMBA_START_DOCSTRING, +) +# Copied from transformers.models.mixtral.modeling_mixtral.MixtralForSequenceClassification with Mixtral->Jamba, MIXTRAL->JAMBA +class JambaForSequenceClassification(JambaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = JambaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(JAMBA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/maskformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/maskformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..efb2290f2c9ceb826e3902a9af7dd22a85f884ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/maskformer/__init__.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], + "configuration_maskformer_swin": ["MaskFormerSwinConfig"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_maskformer"] = ["MaskFormerFeatureExtractor"] + _import_structure["image_processing_maskformer"] = ["MaskFormerImageProcessor"] + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_maskformer"] = [ + "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "MaskFormerForInstanceSegmentation", + "MaskFormerModel", + "MaskFormerPreTrainedModel", + ] + _import_structure["modeling_maskformer_swin"] = [ + "MaskFormerSwinBackbone", + "MaskFormerSwinModel", + "MaskFormerSwinPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig + from .configuration_maskformer_swin import MaskFormerSwinConfig + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_maskformer import MaskFormerFeatureExtractor + from .image_processing_maskformer import MaskFormerImageProcessor + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_maskformer import ( + MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + MaskFormerForInstanceSegmentation, + MaskFormerModel, + MaskFormerPreTrainedModel, + ) + from .modeling_maskformer_swin import ( + MaskFormerSwinBackbone, + MaskFormerSwinModel, + MaskFormerSwinPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/venv/lib/python3.10/site-packages/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..999eee136afbe15a66e1793721334e733bc85fde --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py @@ -0,0 +1,730 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from argparse import ArgumentParser +from dataclasses import dataclass +from pathlib import Path +from pprint import pformat +from typing import Any, Dict, Iterator, List, Set, Tuple + +import requests +import torch +import torchvision.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog +from detectron2.projects.deeplab import add_deeplab_config +from PIL import Image +from torch import Tensor, nn + +from transformers.models.maskformer.feature_extraction_maskformer import MaskFormerImageProcessor +from transformers.models.maskformer.modeling_maskformer import ( + MaskFormerConfig, + MaskFormerForInstanceSegmentation, + MaskFormerForInstanceSegmentationOutput, + MaskFormerModel, + MaskFormerModelOutput, +) +from transformers.utils import logging + + +StateDict = Dict[str, Tensor] + +logging.set_verbosity_info() +logger = logging.get_logger() + +torch.manual_seed(0) + + +class TrackedStateDict: + def __init__(self, to_track: Dict): + """This class "tracks" a python dictionary by keeping track of which item is accessed. + + Args: + to_track (Dict): The dictionary we wish to track + """ + self.to_track = to_track + self._seen: Set[str] = set() + + def __getitem__(self, key: str) -> Any: + return self.to_track[key] + + def __setitem__(self, key: str, item: Any): + self._seen.add(key) + self.to_track[key] = item + + def diff(self) -> List[str]: + """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. + This is an effective method to check if we have update all the keys + + Returns: + List[str]: List of keys not yet updated + """ + return set(self.to_track.keys()) - self._seen + + def copy(self) -> Dict: + # proxy the call to the internal dictionary + return self.to_track.copy() + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + img_data = requests.get(url, stream=True).raw + im = Image.open(img_data) + return im + + +@dataclass +class Args: + """Fake command line arguments needed by maskformer/detectron implementation""" + + config_file: str + + +def setup_cfg(args: Args): + # load config from file and command-line arguments + cfg = get_cfg() + add_deeplab_config(cfg) + add_mask_former_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.freeze() + return cfg + + +class OriginalMaskFormerConfigToOursConverter: + def __call__(self, original_config: object) -> MaskFormerConfig: + model = original_config.MODEL + mask_former = model.MASK_FORMER + swin = model.SWIN + + dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) + id2label = dict(enumerate(dataset_catalog.stuff_classes)) + label2id = {label: idx for idx, label in id2label.items()} + + config: MaskFormerConfig = MaskFormerConfig( + fpn_feature_size=model.SEM_SEG_HEAD.CONVS_DIM, + mask_feature_size=model.SEM_SEG_HEAD.MASK_DIM, + num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, + no_object_weight=mask_former.NO_OBJECT_WEIGHT, + num_queries=mask_former.NUM_OBJECT_QUERIES, + backbone_config={ + "pretrain_img_size": swin.PRETRAIN_IMG_SIZE, + "image_size": swin.PRETRAIN_IMG_SIZE, + "in_channels": 3, + "patch_size": swin.PATCH_SIZE, + "embed_dim": swin.EMBED_DIM, + "depths": swin.DEPTHS, + "num_heads": swin.NUM_HEADS, + "window_size": swin.WINDOW_SIZE, + "drop_path_rate": swin.DROP_PATH_RATE, + "model_type": "swin", + }, + dice_weight=mask_former.DICE_WEIGHT, + ce_weight=1.0, + mask_weight=mask_former.MASK_WEIGHT, + decoder_config={ + "model_type": "detr", + "max_position_embeddings": 1024, + "encoder_layers": 6, + "encoder_ffn_dim": 2048, + "encoder_attention_heads": 8, + "decoder_layers": mask_former.DEC_LAYERS, + "decoder_ffn_dim": mask_former.DIM_FEEDFORWARD, + "decoder_attention_heads": mask_former.NHEADS, + "encoder_layerdrop": 0.0, + "decoder_layerdrop": 0.0, + "d_model": mask_former.HIDDEN_DIM, + "dropout": mask_former.DROPOUT, + "attention_dropout": 0.0, + "activation_dropout": 0.0, + "init_std": 0.02, + "init_xavier_std": 1.0, + "scale_embedding": False, + "auxiliary_loss": False, + "dilation": False, + # default pretrained config values + }, + id2label=id2label, + label2id=label2id, + ) + + return config + + +class OriginalMaskFormerConfigToImageProcessorConverter: + def __call__(self, original_config: object) -> MaskFormerImageProcessor: + model = original_config.MODEL + model_input = original_config.INPUT + dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) + + return MaskFormerImageProcessor( + image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), + image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), + size=model_input.MIN_SIZE_TEST, + max_size=model_input.MAX_SIZE_TEST, + num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, + ignore_index=dataset_catalog.ignore_label, + size_divisibility=32, # 32 is required by swin + ) + + +class OriginalMaskFormerCheckpointToOursConverter: + def __init__(self, original_model: nn.Module, config: MaskFormerConfig): + self.original_model = original_model + self.config = config + + def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): + for src_key, dst_key in renamed_keys: + dst_state_dict[dst_key] = src_state_dict.pop(src_key) + + def replace_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: MaskFormerConfig): + dst_prefix: str = "pixel_level_module.encoder" + src_prefix: str = "backbone" + + renamed_keys = [ + ( + f"{src_prefix}.patch_embed.proj.weight", + f"{dst_prefix}.model.embeddings.patch_embeddings.projection.weight", + ), + (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.model.embeddings.patch_embeddings.projection.bias"), + (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.model.embeddings.norm.weight"), + (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.model.embeddings.norm.bias"), + ] + num_layers = len(config.backbone_config.depths) + for layer_idx in range(num_layers): + for block_idx in range(config.backbone_config.depths[layer_idx]): + renamed_keys.extend( + [ # src, dst + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", + ), + ] + ) + # now we need to handle the attentions + # read in weights + bias of input projection layer of cross-attention + + src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] + src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] + + size = src_att_weight.shape[0] + offset = size // 3 + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" + ] = src_att_weight[:offset, :] + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" + ] = src_att_bias[:offset] + + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" + ] = src_att_weight[offset : offset * 2, :] + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" + ] = src_att_bias[offset : offset * 2] + + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" + ] = src_att_weight[-offset:, :] + dst_state_dict[ + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" + ] = src_att_bias[-offset:] + + # let's pop them + src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") + src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") + # proj + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", + ), + ] + ) + + # second norm + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", + ), + ] + ) + + # mlp + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", + ), + ] + ) + + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", + ) + ] + ) + + if layer_idx < num_layers - 1: + # patch merging + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.reduction.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", + f"{dst_prefix}.model.encoder.layers.{layer_idx}.downsample.norm.bias", + ), + ] + ) + + # hidden states norms + renamed_keys.extend( + [ + ( + f"{src_prefix}.norm{layer_idx}.weight", + f"{dst_prefix}.hidden_states_norms.{layer_idx}.weight", + ), + ( + f"{src_prefix}.norm{layer_idx}.bias", + f"{dst_prefix}.hidden_states_norms.{layer_idx}.bias", + ), + ] + ) + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "pixel_level_module.decoder" + src_prefix: str = "sem_seg_head.pixel_decoder" + + self.replace_backbone(dst_state_dict, src_state_dict, self.config) + + def rename_keys_for_conv(detectron_conv: str, mine_conv: str): + return [ + (f"{detectron_conv}.weight", f"{mine_conv}.0.weight"), + # 2 cuz the have act in the middle -> rename it + (f"{detectron_conv}.norm.weight", f"{mine_conv}.1.weight"), + (f"{detectron_conv}.norm.bias", f"{mine_conv}.1.bias"), + ] + + renamed_keys = [ + (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), + (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), + # the layers in the original one are in reverse order, stem is the last one! + ] + + renamed_keys.extend(rename_keys_for_conv(f"{src_prefix}.layer_4", f"{dst_prefix}.fpn.stem")) + + # add all the fpn layers (here we need some config parameters to know the size in advance) + for src_i, dst_i in zip(range(3, 0, -1), range(0, 3)): + renamed_keys.extend( + rename_keys_for_conv(f"{src_prefix}.adapter_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.proj") + ) + renamed_keys.extend( + rename_keys_for_conv(f"{src_prefix}.layer_{src_i}", f"{dst_prefix}.fpn.layers.{dst_i}.block") + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def rename_keys_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module.decoder" + src_prefix: str = "sem_seg_head.predictor.transformer.decoder" + # not sure why we are not popping direcetly here! + # here we list all keys to be renamed (original name on the left, our name on the right) + rename_keys = [] + for i in range(self.config.decoder_config.decoder_layers): + # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms + rename_keys.append( + ( + f"{src_prefix}.layers.{i}.self_attn.out_proj.weight", + f"{dst_prefix}.layers.{i}.self_attn.out_proj.weight", + ) + ) + rename_keys.append( + ( + f"{src_prefix}.layers.{i}.self_attn.out_proj.bias", + f"{dst_prefix}.layers.{i}.self_attn.out_proj.bias", + ) + ) + rename_keys.append( + ( + f"{src_prefix}.layers.{i}.multihead_attn.out_proj.weight", + f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.weight", + ) + ) + rename_keys.append( + ( + f"{src_prefix}.layers.{i}.multihead_attn.out_proj.bias", + f"{dst_prefix}.layers.{i}.encoder_attn.out_proj.bias", + ) + ) + rename_keys.append((f"{src_prefix}.layers.{i}.linear1.weight", f"{dst_prefix}.layers.{i}.fc1.weight")) + rename_keys.append((f"{src_prefix}.layers.{i}.linear1.bias", f"{dst_prefix}.layers.{i}.fc1.bias")) + rename_keys.append((f"{src_prefix}.layers.{i}.linear2.weight", f"{dst_prefix}.layers.{i}.fc2.weight")) + rename_keys.append((f"{src_prefix}.layers.{i}.linear2.bias", f"{dst_prefix}.layers.{i}.fc2.bias")) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm1.weight", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.weight") + ) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm1.bias", f"{dst_prefix}.layers.{i}.self_attn_layer_norm.bias") + ) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm2.weight", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.weight") + ) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm2.bias", f"{dst_prefix}.layers.{i}.encoder_attn_layer_norm.bias") + ) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm3.weight", f"{dst_prefix}.layers.{i}.final_layer_norm.weight") + ) + rename_keys.append( + (f"{src_prefix}.layers.{i}.norm3.bias", f"{dst_prefix}.layers.{i}.final_layer_norm.bias") + ) + + return rename_keys + + def replace_q_k_v_in_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module.decoder" + src_prefix: str = "sem_seg_head.predictor.transformer.decoder" + for i in range(self.config.decoder_config.decoder_layers): + # read in weights + bias of input projection layer of self-attention + in_proj_weight = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = src_state_dict.pop(f"{src_prefix}.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + dst_state_dict[f"{dst_prefix}.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + # read in weights + bias of input projection layer of cross-attention + in_proj_weight_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_weight") + in_proj_bias_cross_attn = src_state_dict.pop(f"{src_prefix}.layers.{i}.multihead_attn.in_proj_bias") + # next, add query, keys and values (in that order) of cross-attention to the state dict + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :] + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256] + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[ + 256:512, : + ] + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512] + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :] + dst_state_dict[f"{dst_prefix}.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:] + + def replace_detr_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module.decoder" + src_prefix: str = "sem_seg_head.predictor.transformer.decoder" + renamed_keys = self.rename_keys_in_detr_decoder(dst_state_dict, src_state_dict) + # add more + renamed_keys.extend( + [ + (f"{src_prefix}.norm.weight", f"{dst_prefix}.layernorm.weight"), + (f"{src_prefix}.norm.bias", f"{dst_prefix}.layernorm.bias"), + ] + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + self.replace_q_k_v_in_detr_decoder(dst_state_dict, src_state_dict) + + def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module" + src_prefix: str = "sem_seg_head.predictor" + + self.replace_detr_decoder(dst_state_dict, src_state_dict) + + renamed_keys = [ + (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), + (f"{src_prefix}.input_proj.weight", f"{dst_prefix}.input_projection.weight"), + (f"{src_prefix}.input_proj.bias", f"{dst_prefix}.input_projection.bias"), + ] + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def replace_instance_segmentation_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): + # NOTE in our case we don't have a prefix, thus we removed the "." from the keys later on! + dst_prefix: str = "" + src_prefix: str = "sem_seg_head.predictor" + + renamed_keys = [ + (f"{src_prefix}.class_embed.weight", f"{dst_prefix}class_predictor.weight"), + (f"{src_prefix}.class_embed.bias", f"{dst_prefix}class_predictor.bias"), + ] + + mlp_len = 3 + for i in range(mlp_len): + renamed_keys.extend( + [ + (f"{src_prefix}.mask_embed.layers.{i}.weight", f"{dst_prefix}mask_embedder.{i}.0.weight"), + (f"{src_prefix}.mask_embed.layers.{i}.bias", f"{dst_prefix}mask_embedder.{i}.0.bias"), + ] + ) + logger.info(f"Replacing keys {pformat(renamed_keys)}") + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def convert(self, mask_former: MaskFormerModel) -> MaskFormerModel: + dst_state_dict = TrackedStateDict(mask_former.state_dict()) + src_state_dict = self.original_model.state_dict() + + self.replace_pixel_module(dst_state_dict, src_state_dict) + self.replace_transformer_module(dst_state_dict, src_state_dict) + + logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") + logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") + logger.info("🙌 Done") + + mask_former.load_state_dict(dst_state_dict) + + return mask_former + + def convert_instance_segmentation( + self, mask_former: MaskFormerForInstanceSegmentation + ) -> MaskFormerForInstanceSegmentation: + dst_state_dict = TrackedStateDict(mask_former.state_dict()) + src_state_dict = self.original_model.state_dict() + + self.replace_instance_segmentation_module(dst_state_dict, src_state_dict) + + mask_former.load_state_dict(dst_state_dict) + + return mask_former + + @staticmethod + def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: + checkpoints: List[Path] = checkpoints_dir.glob("**/*.pkl") + + for checkpoint in checkpoints: + logger.info(f"💪 Converting {checkpoint.stem}") + # find associated config file + config: Path = config_dir / checkpoint.parents[0].stem / "swin" / f"{checkpoint.stem}.yaml" + + yield config, checkpoint + + +def test(original_model, our_model: MaskFormerForInstanceSegmentation, image_processor: MaskFormerImageProcessor): + with torch.no_grad(): + original_model = original_model.eval() + our_model = our_model.eval() + + im = prepare_img() + + tr = T.Compose( + [ + T.Resize((384, 384)), + T.ToTensor(), + T.Normalize( + mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, + std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, + ), + ], + ) + + x = tr(im).unsqueeze(0) + + original_model_backbone_features = original_model.backbone(x.clone()) + + our_model_output: MaskFormerModelOutput = our_model.model(x.clone(), output_hidden_states=True) + + for original_model_feature, our_model_feature in zip( + original_model_backbone_features.values(), our_model_output.encoder_hidden_states + ): + assert torch.allclose( + original_model_feature, our_model_feature, atol=1e-3 + ), "The backbone features are not the same." + + original_model_pixel_out = original_model.sem_seg_head.pixel_decoder.forward_features( + original_model_backbone_features + ) + + assert torch.allclose( + original_model_pixel_out[0], our_model_output.pixel_decoder_last_hidden_state, atol=1e-4 + ), "The pixel decoder feature are not the same" + + # let's test the full model + original_model_out = original_model([{"image": x.squeeze(0)}]) + + original_segmentation = original_model_out[0]["sem_seg"] + + our_model_out: MaskFormerForInstanceSegmentationOutput = our_model(x) + + our_segmentation = image_processor.post_process_segmentation(our_model_out, target_size=(384, 384)) + + assert torch.allclose( + original_segmentation, our_segmentation, atol=1e-3 + ), "The segmentation image is not the same." + + logger.info("✅ Test passed!") + + +def get_name(checkpoint_file: Path): + model_name_raw: str = checkpoint_file.stem + # model_name_raw is something like maskformer_panoptic_swin_base_IN21k_384_bs64_554k + parent_name: str = checkpoint_file.parents[0].stem + backbone = "swin" + dataset = "" + if "coco" in parent_name: + dataset = "coco" + elif "ade" in parent_name: + dataset = "ade" + else: + raise ValueError(f"{parent_name} must be wrong since we didn't find 'coco' or 'ade' in it ") + + backbone_types = ["tiny", "small", "base", "large"] + + backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] + + model_name = f"maskformer-{backbone}-{backbone_type}-{dataset}" + + return model_name + + +if __name__ == "__main__": + parser = ArgumentParser( + description="Command line to convert the original maskformers (with swin backbone) to our implementations." + ) + + parser.add_argument( + "--checkpoints_dir", + type=Path, + help=( + "A directory containing the model's checkpoints. The directory has to have the following structure:" + " //.pkl" + ), + ) + parser.add_argument( + "--configs_dir", + type=Path, + help=( + "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" + " structure: //.yaml" + ), + ) + parser.add_argument( + "--pytorch_dump_folder_path", + required=True, + type=Path, + help="Path to the folder to output PyTorch models.", + ) + parser.add_argument( + "--maskformer_dir", + required=True, + type=Path, + help=( + "A path to MaskFormer's original implementation directory. You can download from here:" + " https://github.com/facebookresearch/MaskFormer" + ), + ) + + args = parser.parse_args() + + checkpoints_dir: Path = args.checkpoints_dir + config_dir: Path = args.configs_dir + save_directory: Path = args.pytorch_dump_folder_path + maskformer_dir: Path = args.maskformer_dir + # append the path to the parents to maskformer dir + sys.path.append(str(maskformer_dir.parent)) + # and import what's needed + from MaskFormer.mask_former import add_mask_former_config + from MaskFormer.mask_former.mask_former_model import MaskFormer as OriginalMaskFormer + + if not save_directory.exists(): + save_directory.mkdir(parents=True) + + for config_file, checkpoint_file in OriginalMaskFormerCheckpointToOursConverter.using_dirs( + checkpoints_dir, config_dir + ): + image_processor = OriginalMaskFormerConfigToImageProcessorConverter()(setup_cfg(Args(config_file=config_file))) + + original_config = setup_cfg(Args(config_file=config_file)) + mask_former_kwargs = OriginalMaskFormer.from_config(original_config) + + original_model = OriginalMaskFormer(**mask_former_kwargs).eval() + + DetectionCheckpointer(original_model).load(str(checkpoint_file)) + + config: MaskFormerConfig = OriginalMaskFormerConfigToOursConverter()(original_config) + + mask_former = MaskFormerModel(config=config).eval() + + converter = OriginalMaskFormerCheckpointToOursConverter(original_model, config) + + maskformer = converter.convert(mask_former) + + mask_former_for_instance_segmentation = MaskFormerForInstanceSegmentation(config=config).eval() + + mask_former_for_instance_segmentation.model = mask_former + mask_former_for_instance_segmentation = converter.convert_instance_segmentation( + mask_former_for_instance_segmentation + ) + + test(original_model, mask_former_for_instance_segmentation, image_processor) + + model_name = get_name(checkpoint_file) + logger.info(f"🪄 Saving {model_name}") + + image_processor.save_pretrained(save_directory / model_name) + mask_former_for_instance_segmentation.save_pretrained(save_directory / model_name) + + image_processor.push_to_hub( + repo_path_or_name=save_directory / model_name, + commit_message="Add model", + use_temp_dir=True, + ) + mask_former_for_instance_segmentation.push_to_hub( + repo_path_or_name=save_directory / model_name, + commit_message="Add model", + use_temp_dir=True, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer_swin.py b/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer_swin.py new file mode 100644 index 0000000000000000000000000000000000000000..b4714860e6bffb4c70678ff6f1867262e2cc4f1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/maskformer/modeling_maskformer_swin.py @@ -0,0 +1,912 @@ +# coding=utf-8 +# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MaskFormer Swin Transformer. The reason Swin Transformer is implemented here is because MaskFormer uses the hidden +states before downsampling, which is different from the default Swin Transformer.""" + +import collections.abc +import math +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import Tensor, nn + +from ...activations import ACT2FN +from ...file_utils import ModelOutput +from ...modeling_outputs import BackboneOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer +from ...utils.backbone_utils import BackboneMixin +from .configuration_maskformer_swin import MaskFormerSwinConfig + + +@dataclass +class MaskFormerSwinModelOutputWithPooling(ModelOutput): + """ + Class for MaskFormerSwinModel's outputs that also contains the spatial dimensions of the hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state after a mean pooling operation. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*): + A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to + `batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the + `forward` method. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states_spatial_dimensions: Tuple[Tuple[int, int]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class MaskFormerSwinBaseModelOutput(ModelOutput): + """ + Class for SwinEncoder's outputs. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*): + A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to + `batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward` + method. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states_spatial_dimensions: Tuple[Tuple[int, int]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.swin.modeling_swin.window_partition +def window_partition(input_feature, window_size): + """ + Partitions the given input into windows. + """ + batch_size, height, width, num_channels = input_feature.shape + input_feature = input_feature.view( + batch_size, height // window_size, window_size, width // window_size, window_size, num_channels + ) + windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) + return windows + + +# Copied from transformers.models.swin.modeling_swin.window_reverse +def window_reverse(windows, window_size, height, width): + """ + Merges windows to produce higher resolution features. + """ + num_channels = windows.shape[-1] + windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) + windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) + return windows + + +# Copied from transformers.models.swin.modeling_swin.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +class MaskFormerSwinEmbeddings(nn.Module): + """ + Construct the patch and position embeddings. + """ + + def __init__(self, config): + super().__init__() + + self.patch_embeddings = MaskFormerSwinPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.patch_grid = self.patch_embeddings.grid_size + + if config.use_absolute_embeddings: + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) + else: + self.position_embeddings = None + + self.norm = nn.LayerNorm(config.embed_dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, pixel_values): + embeddings, output_dimensions = self.patch_embeddings(pixel_values) + embeddings = self.norm(embeddings) + + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings, output_dimensions + + +# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings +class MaskFormerSwinPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.embed_dim + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def maybe_pad(self, pixel_values, height, width): + if width % self.patch_size[1] != 0: + pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + if height % self.patch_size[0] != 0: + pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + return pixel_values + + def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: + _, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + # pad the input to be divisible by self.patch_size, if needed + pixel_values = self.maybe_pad(pixel_values, height, width) + embeddings = self.projection(pixel_values) + _, _, height, width = embeddings.shape + output_dimensions = (height, width) + embeddings = embeddings.flatten(2).transpose(1, 2) + + return embeddings, output_dimensions + + +# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging +class MaskFormerSwinPatchMerging(nn.Module): + """ + Patch Merging Layer. + + Args: + input_resolution (`Tuple[int]`): + Resolution of input feature. + dim (`int`): + Number of input channels. + norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): + Normalization layer class. + """ + + def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def maybe_pad(self, input_feature, height, width): + should_pad = (height % 2 == 1) or (width % 2 == 1) + if should_pad: + pad_values = (0, 0, 0, width % 2, 0, height % 2) + input_feature = nn.functional.pad(input_feature, pad_values) + + return input_feature + + def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: + height, width = input_dimensions + # `dim` is height * width + batch_size, dim, num_channels = input_feature.shape + + input_feature = input_feature.view(batch_size, height, width, num_channels) + # pad input to be disible by width and height, if needed + input_feature = self.maybe_pad(input_feature, height, width) + # [batch_size, height/2, width/2, num_channels] + input_feature_0 = input_feature[:, 0::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_1 = input_feature[:, 1::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_2 = input_feature[:, 0::2, 1::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_3 = input_feature[:, 1::2, 1::2, :] + # batch_size height/2 width/2 4*num_channels + input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) + input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C + + input_feature = self.norm(input_feature) + input_feature = self.reduction(input_feature) + + return input_feature + + +# Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->MaskFormerSwin +class MaskFormerSwinDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->MaskFormerSwin +class MaskFormerSwinSelfAttention(nn.Module): + def __init__(self, config, dim, num_heads, window_size): + super().__init__() + if dim % num_heads != 0: + raise ValueError( + f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" + ) + + self.num_attention_heads = num_heads + self.attention_head_size = int(dim / num_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.window_size = ( + window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) + ) + + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) + ) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) + self.register_buffer("relative_position_index", relative_position_index) + + self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + batch_size, dim, num_channels = hidden_states.shape + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] + relative_position_bias = relative_position_bias.view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) + + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() + attention_scores = attention_scores + relative_position_bias.unsqueeze(0) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in MaskFormerSwinModel forward() function) + mask_shape = attention_mask.shape[0] + attention_scores = attention_scores.view( + batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim + ) + attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) + attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->MaskFormerSwin +class MaskFormerSwinSelfOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, dim) + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->MaskFormerSwin +class MaskFormerSwinAttention(nn.Module): + def __init__(self, config, dim, num_heads, window_size): + super().__init__() + self.self = MaskFormerSwinSelfAttention(config, dim, num_heads, window_size) + self.output = MaskFormerSwinSelfOutput(config, dim) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->MaskFormerSwin +class MaskFormerSwinIntermediate(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->MaskFormerSwin +class MaskFormerSwinOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class MaskFormerSwinLayer(nn.Module): + def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): + super().__init__() + self.shift_size = shift_size + self.window_size = config.window_size + self.input_resolution = input_resolution + self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.attention = MaskFormerSwinAttention(config, dim, num_heads, self.window_size) + self.drop_path = ( + MaskFormerSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() + ) + self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.intermediate = MaskFormerSwinIntermediate(config, dim) + self.output = MaskFormerSwinOutput(config, dim) + + def get_attn_mask(self, input_resolution): + if self.shift_size > 0: + # calculate attention mask for SW-MSA + height, width = input_resolution + img_mask = torch.zeros((1, height, width, 1)) + height_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + width_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + count = 0 + for height_slice in height_slices: + for width_slice in width_slices: + img_mask[:, height_slice, width_slice, :] = count + count += 1 + + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def maybe_pad(self, hidden_states, height, width): + pad_left = pad_top = 0 + pad_rigth = (self.window_size - width % self.window_size) % self.window_size + pad_bottom = (self.window_size - height % self.window_size) % self.window_size + pad_values = (0, 0, pad_left, pad_rigth, pad_top, pad_bottom) + hidden_states = nn.functional.pad(hidden_states, pad_values) + return hidden_states, pad_values + + def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False): + height, width = input_dimensions + batch_size, dim, channels = hidden_states.size() + shortcut = hidden_states + + hidden_states = self.layernorm_before(hidden_states) + hidden_states = hidden_states.view(batch_size, height, width, channels) + # pad hidden_states to multiples of window size + hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) + + _, height_pad, width_pad, _ = hidden_states.shape + # cyclic shift + if self.shift_size > 0: + shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_hidden_states = hidden_states + + # partition windows + hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) + hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) + attn_mask = self.get_attn_mask((height_pad, width_pad)) + if attn_mask is not None: + attn_mask = attn_mask.to(hidden_states_windows.device) + + self_attention_outputs = self.attention( + hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions + ) + + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) + shifted_windows = window_reverse( + attention_windows, self.window_size, height_pad, width_pad + ) # B height' width' C + + # reverse cyclic shift + if self.shift_size > 0: + attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + attention_windows = shifted_windows + + was_padded = pad_values[3] > 0 or pad_values[5] > 0 + if was_padded: + attention_windows = attention_windows[:, :height, :width, :].contiguous() + + attention_windows = attention_windows.view(batch_size, height * width, channels) + + hidden_states = shortcut + self.drop_path(attention_windows) + + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + layer_output = hidden_states + self.output(layer_output) + + outputs = (layer_output,) + outputs + + return outputs + + +class MaskFormerSwinStage(nn.Module): + # Copied from transformers.models.swin.modeling_swin.SwinStage.__init__ with Swin->MaskFormerSwin + def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): + super().__init__() + self.config = config + self.dim = dim + self.blocks = nn.ModuleList( + [ + MaskFormerSwinLayer( + config=config, + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + shift_size=0 if (i % 2 == 0) else config.window_size // 2, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) + else: + self.downsample = None + + self.pointing = False + + def forward( + self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False + ): + all_hidden_states = () if output_hidden_states else None + + height, width = input_dimensions + for i, block_module in enumerate(self.blocks): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + block_hidden_states = block_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) + + hidden_states = block_hidden_states[0] + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.downsample is not None: + height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 + output_dimensions = (height, width, height_downsampled, width_downsampled) + hidden_states = self.downsample(hidden_states, input_dimensions) + else: + output_dimensions = (height, width, height, width) + + return hidden_states, output_dimensions, all_hidden_states + + +class MaskFormerSwinEncoder(nn.Module): + # Copied from transformers.models.swin.modeling_swin.SwinEncoder.__init__ with Swin->MaskFormerSwin + def __init__(self, config, grid_size): + super().__init__() + self.num_layers = len(config.depths) + self.config = config + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] + self.layers = nn.ModuleList( + [ + MaskFormerSwinStage( + config=config, + dim=int(config.embed_dim * 2**i_layer), + input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), + depth=config.depths[i_layer], + num_heads=config.num_heads[i_layer], + drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], + downsample=MaskFormerSwinPatchMerging if (i_layer < self.num_layers - 1) else None, + ) + for i_layer in range(self.num_layers) + ] + ) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + input_dimensions, + head_mask=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_input_dimensions = () + all_self_attentions = () if output_attentions else None + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + for i, layer_module in enumerate(self.layers): + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_hidden_states, output_dimensions, layer_all_hidden_states = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + layer_head_mask, + output_attentions, + ) + else: + layer_hidden_states, output_dimensions, layer_all_hidden_states = layer_module( + hidden_states, + input_dimensions, + layer_head_mask, + output_attentions, + output_hidden_states, + ) + + input_dimensions = (output_dimensions[-2], output_dimensions[-1]) + all_input_dimensions += (input_dimensions,) + if output_hidden_states: + all_hidden_states += (layer_all_hidden_states,) + + hidden_states = layer_hidden_states + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_all_hidden_states[1],) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + + return MaskFormerSwinBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + hidden_states_spatial_dimensions=all_input_dimensions, + attentions=all_self_attentions, + ) + + +# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->MaskFormerSwin, swin->model +class MaskFormerSwinPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MaskFormerSwinConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +class MaskFormerSwinModel(MaskFormerSwinPreTrainedModel): + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + self.num_layers = len(config.depths) + self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) + + self.embeddings = MaskFormerSwinEmbeddings(config) + self.encoder = MaskFormerSwinEncoder(config, self.embeddings.patch_grid) + + self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) + self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward( + self, + pixel_values=None, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, len(self.config.depths)) + + embedding_output, input_dimensions = self.embeddings(pixel_values) + + encoder_outputs = self.encoder( + embedding_output, + input_dimensions, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + + pooled_output = None + if self.pooler is not None: + pooled_output = self.pooler(sequence_output.transpose(1, 2)) + pooled_output = torch.flatten(pooled_output, 1) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + hidden_states_spatial_dimensions = (input_dimensions,) + encoder_outputs.hidden_states_spatial_dimensions + + return MaskFormerSwinModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + hidden_states_spatial_dimensions=hidden_states_spatial_dimensions, + attentions=encoder_outputs.attentions, + ) + + +class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin): + """ + MaskFormerSwin backbone, designed especially for the MaskFormer framework. + + This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size, + num_channels, height, width)`). It also adds additional layernorms after each stage. + + Args: + config (`MaskFormerSwinConfig`): + The configuration used by [`MaskFormerSwinModel`]. + """ + + def __init__(self, config: MaskFormerSwinConfig): + super().__init__(config) + super()._init_backbone(config) + + self.model = MaskFormerSwinModel(config) + if "stem" in self.out_features: + raise ValueError("This backbone does not support 'stem' in the `out_features`.") + self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] + self.hidden_states_norms = nn.ModuleList( + [nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]] + ) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + pixel_values: Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> BackboneOutput: + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + outputs = self.model( + pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True + ) + + # we skip the stem + hidden_states = outputs.hidden_states[1:] + + # we need to reshape the hidden states to their original spatial dimensions + # spatial dimensions contains all the heights and widths of each stage, including after the embeddings + spatial_dimensions: Tuple[Tuple[int, int]] = outputs.hidden_states_spatial_dimensions + feature_maps = () + for i, (hidden_state, stage, (height, width)) in enumerate( + zip(hidden_states, self.stage_names[1:], spatial_dimensions) + ): + norm = self.hidden_states_norms[i] + # the last element corespond to the layer's last block output but before patch merging + hidden_state_unpolled = hidden_state[-1] + hidden_state_norm = norm(hidden_state_unpolled) + # the pixel decoder (FPN) expects 3D tensors (features) + batch_size, _, hidden_size = hidden_state_norm.shape + # reshape "b (h w) d -> b d h w" + hidden_state_permuted = ( + hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous() + ) + if stage in self.out_features: + feature_maps += (hidden_state_permuted,) + + if not return_dict: + output = (feature_maps,) + if output_hidden_states: + output += (outputs.hidden_states,) + if output_attentions: + output += (outputs.attentions,) + return output + + return BackboneOutput( + feature_maps=feature_maps, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mega/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..728499ef2d385f6cc652525605b9912f22af209a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mega/__init__.py @@ -0,0 +1,70 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_mega"] = [ + "MEGA_PRETRAINED_MODEL_ARCHIVE_LIST", + "MegaForCausalLM", + "MegaForMaskedLM", + "MegaForMultipleChoice", + "MegaForQuestionAnswering", + "MegaForSequenceClassification", + "MegaForTokenClassification", + "MegaModel", + "MegaPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_mega import ( + MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, + MegaForCausalLM, + MegaForMaskedLM, + MegaForMultipleChoice, + MegaForQuestionAnswering, + MegaForSequenceClassification, + MegaForTokenClassification, + MegaModel, + MegaPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7743f0d865596731ebb03c37920f2f5ba5fcb4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dcecc279a4038aa054a3494798e0fc53c8b0f9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/configuration_mega.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ea7336c54090aa05859d6bdcf6e077fbb94531b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/convert_mega_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..186a1f2a4ac31b57c547383894fa7eea9d527493 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mega/__pycache__/modeling_mega.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py b/venv/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py new file mode 100644 index 0000000000000000000000000000000000000000..993a21cf7035d665244869ce8e30ff4b997ebb73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mega/configuration_mega.py @@ -0,0 +1,242 @@ +# coding=utf-8 +# Copyright 2023 The Mega Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" MEGA configuration""" +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class MegaConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Mega + [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`MegaModel`]. + hidden_size (`int`, *optional*, defaults to 128): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 4): + Number of hidden layers in the Mega encoder. + intermediate_size (`int`, *optional*, defaults to 256): + Dimensionality of the hidden size (self-attention value projection) within the Mega encoder + ema_projection_size (`int`, *optional*, defaults to 16): + Dimensionality of the MegaMultiDimensionDampedEma + bidirectional (`bool`, *optional*, defaults to `True`): + Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`) + or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be + False if you intend to use the model as a decoder. + shared_representation_size (`int`, *optional*, defaults to 64): + Dimensionality of the linear projection for shared representation of self-attention queries and keys + use_chunking (`bool`, *optional*, defaults to `False`): + Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper) + chunk_size (`int`, *optional*, defaults to -1): + If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If + chunking is used, input sequences must be padded to a multiple of `chunk_size` + truncation (`int`, *optional*): + If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma + normalize_before_mega (`bool`, *optional*, defaults to `True`): + Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks + normalization_type (`str`, *optional*, defaults to `"scalenorm"`): + Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`, + `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm) + norm_affine (`bool`, *optional*, defaults to `True`): + If `True`, applies a parameterized affine transformation to inputs during normalization + activation (`str`, *optional*, defaults to `"silu"`): + Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`, + `"gelu"`, or `"gelu_accurate"` + attention_activation (`str`, *optional*, defaults to `"softmax"`): + Activation function to apply for single-headed self-attention (a la Transformer). Choose one of + `"softmax"`, `"laplace"`, or `"relu2"` + dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for EMA self-attention + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + use_feature_dropout (`bool`, *optional*, defaults to `False`): + Whether to use feature-based (`True`) or standard dropout (`False`) + use_normalized_ffn (`bool`, *optional*, defaults to `True`): + Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output + as-is (`False`) + nffn_hidden_size (`int`, *optional*, defaults to 256): + If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this + is the hidden size of the NFFN + normalize_before_ffn (`bool`, *optional*, defaults to `True`): + Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN + nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the NFFN component. + max_positions (`int`, *optional*, defaults to 2048): + The maximum sequence length to use for positional representations. For `"simple"` relative positional bias, + this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer + sequences + add_token_type_embeddings (`bool`, *optional*, defaults to `True`): + Whether to account for token types in embeddings. Left as optional to maintain compatibility with original + implementation while adding support for token types. + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if + `add_token_type_embeddings = True` + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + ema_delta_alpha_range (`float`, *optional*, defaults to 0.2): + The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in + MegaMultiDimensionDampedEma. + ema_beta_range (`float`, *optional*, defaults to 0.02): + The standard deviation for initializing the beta parameter (expansion matrix) in + MegaMultiDimensionDampedEma. + ema_gamma_omega_range (`float`, *optional*, defaults to 1.0): + The standard deviation for initializing the gamma (projection matrix) and omega (residual weight) + parameters in MultiDimensionEMA. + relative_positional_bias (`str`, *optional*, defaults to `"rotary"`): + Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected, + `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`. + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`): + Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass + hidden states directly to LM head (`False`). Remains optional for compatibility with original + implementation + + Examples: + + ```python + >>> from transformers import MegaConfig, MegaModel + + >>> # Initializing a Mega configuration + >>> configuration = MegaConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = MegaModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "mega" + + def __init__( + self, + vocab_size=30522, + hidden_size=128, + num_hidden_layers=4, + intermediate_size=256, + ema_projection_size=16, + bidirectional=True, + shared_representation_size=64, + use_chunking=False, + chunk_size=-1, + truncation=None, + normalize_before_mega=True, + normalization_type="scalenorm", + norm_affine=True, + activation="silu", + attention_activation="softmax", + dropout_prob=0.1, + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + use_feature_dropout=False, + use_normalized_ffn=True, + nffn_hidden_size=256, + normalize_before_ffn=True, + nffn_activation_dropout_prob=0.1, + max_positions=2048, + add_token_type_embeddings=False, + type_vocab_size=2, + initializer_range=0.02, + ema_delta_alpha_range=0.2, + ema_beta_range=0.02, + ema_gamma_omega_range=1.0, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + relative_positional_bias="rotary", + classifier_dropout=None, + use_cache=True, + add_lm_hidden_dense_layer=True, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.activation = activation + self.attention_activation = attention_activation + self.intermediate_size = intermediate_size + self.ema_projection_size = ema_projection_size + self.bidirectional = bidirectional + self.shared_representation_size = shared_representation_size + self.use_chunking = use_chunking + self.chunk_size = chunk_size + self.truncation = truncation + self.normalize_before_mega = normalize_before_mega + self.normalization_type = normalization_type + self.norm_affine = norm_affine + self.dropout_prob = dropout_prob + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.use_feature_dropout = use_feature_dropout + self.use_normalized_ffn = use_normalized_ffn + self.nffn_hidden_size = nffn_hidden_size + self.normalize_before_ffn = normalize_before_ffn + self.nffn_activation_dropout_prob = nffn_activation_dropout_prob + self.max_positions = max_positions + self.add_token_type_embeddings = add_token_type_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.ema_delta_alpha_range = ema_delta_alpha_range + self.ema_beta_range = ema_beta_range + self.ema_gamma_omega_range = ema_gamma_omega_range + self.relative_positional_bias = relative_positional_bias + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer + self.num_attention_heads = 1 # not used but required by Hugging Face + + +class MegaOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ] + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..2fe75ba27324fd2903e6116e659d189643341167 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mega/convert_mega_original_pytorch_checkpoint_to_pytorch.py @@ -0,0 +1,291 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Convert Mega pretrained checkpoint. Built to convert the Masked LM checkpoint located at +https://huggingface.co/mnaylor/mega-wikitext-103 + +Requirements: + - clone the Mega repo and install fairseq from there + 1. git clone https://github.com/facebookresearch/mega.git + 2. cd mega && pip install -e + - clone the pretrained weights for the original implementation from the hugging face repo + * use this location as the path for pretrained weights +""" +import argparse + +# utilities to import the model weights and config file +import os +import pickle as pkl + +# PyTorch + new model classes +import torch +from torch import nn + +from transformers import AutoTokenizer, MegaConfig, MegaForMaskedLM + + +# import the EncoderLayer class used to pretrain +# !! NOTE !! this requires the version of fairseq that is built when you install the Mega source +try: + from fairseq.modules.mega_layer import MegaEncoderLayer +except ImportError: + raise ImportError("You need to install the version of fairseq from the Mega repo!") + + +# define the wrapper classes used to train the MLM (see colab notebook below) +# https://colab.research.google.com/drive/1qfUO6o5HRdxBblWlw058HVyvaEPhPpH8?usp=sharing +# MegaLM outputs hidden states +class MegaLM(nn.Module): + "The base class for our Mega encoder - given input IDs, embed text and return encoder output" + + def __init__(self, mega_args, depth, vocab_size): + super().__init__() + self.mega_args = mega_args + self.embedding_layer = nn.Embedding(vocab_size, self.mega_args.encoder_embed_dim) + self.encoders = nn.ModuleList([MegaEncoderLayer(self.mega_args) for _ in range(depth)]) + self.depth = depth + + def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): + """ + Code for a forward pass - expects input_ids and attention_mask to come from a Hugging Face tokenizer as PyTorch + tensors, and returns a tensor of size (batch, n_classes) containing classification logits + + Other options: + - batch_first: boolean indicating whether the batch dimension is first in input_ids (default: True, which + aligns with the HF tokenizer behavior) + - ignore_mask_value: the value in attention_mask that identifies tokens that should be ignored (default: 0, + which aligns with HF tokenizer) + """ + + # Mega expects embeddings to be (time, batch, embedding size), but + # Hugging Face returns tokens as (batch, time) + if batch_first: + input_ids = input_ids.T + + # to make things more confusing, Mega expects the attention mask to + # be (batch, time), but with values of 0 (normal token) and 1 (ignore token) + # which is the opposite of what HF returns + if ignore_mask_value == 0: + attention_mask = 1 - attention_mask + + # get token embeddings from IDs + embeds = self.embedding_layer(input_ids) + + # pass through the Mega layers + # input is (time, batch, encoder dim) and output is the same + for encoder in self.encoders: + embeds = encoder(embeds, attention_mask) + + # return according to the shape specified + if batch_first: + # (T, B, H) --> (B, T, H) + return torch.transpose(embeds, 0, 1) + else: + return embeds + + +# renamed from MegaForMaskedLM to avoid confusion with new module +class OriginalMegaForMaskedLM(nn.Module): + "A wrapper class for doing masked language modeling with Mega" + + def __init__(self, mega_args, depth, vocab_size): + super().__init__() + self.mega = MegaLM(mega_args, depth, vocab_size) + self.mlm_head = nn.Linear(mega_args.encoder_embed_dim, vocab_size) + self.dropout = nn.Dropout(p=0.1) + + def forward(self, input_ids, attention_mask, batch_first=True, ignore_mask_value=0): + """ + Perform a forward pass through the Mega encoder and the masked LM head. Returns logits for each vocabulary + entry. + + If `batch_first` (default to align with Hugging Face tokenizer behavior), output will have the shape (Batch + size, Sequence length, Vocab size); otherwise (S, B, V) + """ + encoder_output = self.mega(input_ids, attention_mask, batch_first, ignore_mask_value) + return self.mlm_head(self.dropout(encoder_output)) + + +# code to convert the checkpoint located in the user-specified location +def convert_checkpoint_to_huggingface(pretrained_checkpoint_path, output_path, includes_tokenizer): + with open(os.path.join(pretrained_checkpoint_path, "model_args.pkl"), "rb") as f: + mega_original_args = pkl.load(f) + + # load the original encoder + original_mlm = OriginalMegaForMaskedLM(**mega_original_args).eval() + + # load its weights + print( + "Original Mega encoder:", + original_mlm.mega.load_state_dict( + torch.load(os.path.join(pretrained_checkpoint_path, "encoder_weights.pt"), map_location="cpu") + ), + ) + print( + "Original Mega MLM layer:", + original_mlm.mlm_head.load_state_dict( + torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu") + ), + ) + + # create a new config from the old one + hf_config = MegaConfig( + num_hidden_layers=mega_original_args["depth"], + vocab_size=mega_original_args["vocab_size"], + hidden_size=mega_original_args["mega_args"].encoder_embed_dim, + shared_representation_size=mega_original_args["mega_args"].encoder_z_dim, + intermediate_size=mega_original_args["mega_args"].encoder_hidden_dim, + ema_projection_size=mega_original_args["mega_args"].encoder_n_dim, + dropout_prob=mega_original_args["mega_args"].dropout, + attention_probs_dropout_prob=mega_original_args["mega_args"].attention_dropout, + hidden_dropout_prob=mega_original_args["mega_args"].hidden_dropout, + activation=mega_original_args["mega_args"].activation_fn, + attention_activation=mega_original_args["mega_args"].attention_activation_fn, + bidirectional=mega_original_args["mega_args"].bidirectional, + use_chunking=mega_original_args["mega_args"].encoder_chunk_size > 0, + chunk_size=mega_original_args["mega_args"].encoder_chunk_size, + truncation=mega_original_args["mega_args"].truncation_length, + normalization_type=mega_original_args["mega_args"].normalization_type, + normalize_before_mega=True, + norm_affine=True, + use_feature_dropout=mega_original_args["mega_args"].feature_dropout, + relative_positional_bias=mega_original_args["mega_args"].rel_pos_bias, + max_positions=mega_original_args["mega_args"].max_source_positions, + nffn_hidden_size=mega_original_args["mega_args"].encoder_ffn_embed_dim, + normalize_before_ffn=mega_original_args["mega_args"].normalize_before, + # new arguments added for HF implementation + nffn_activation_dropout_prob=0.0, + add_token_type_embeddings=False, + add_lm_hidden_dense_layer=False, + ) + + hf_mlm = MegaForMaskedLM(hf_config).eval() + + # the originl checkpoint just uses nn.Embedding for the word embeddings + # we use a wrapper module for embeddings to add support for positional embeddings + hf_mlm.mega.embedding_layer.word_embeddings.weight = original_mlm.mega.embedding_layer.weight + + # modify the state dictionary of the original checkpoint to account for naming issues in the Hugging Face + # ecosystem -- any names containing "beta" or "gamma" aren't safe to use and are renamed upon _load_pretrained, + # also renaming previously confusing parameter names + original_state_dict = original_mlm.mega.encoders.state_dict() + updated_keys = {} + for module_name in original_state_dict.keys(): + new_module_name = None + # have to handle gamma, beta, and alpha differently due to their use + # in multiple modules within the original repository; + # beta is used in EMA, MovingAverageGatedAttention, and RotaryRelativePositionalBias, and must be renamed due to flax/tf weights + # the EMA sublayer was renamed from "move" to "ema_gate" for readability, so that is also done here + if "beta" in module_name: + # EMA sub-layers were always called "move" in the original repo + if "move.beta" in module_name: + new_module_name = module_name.replace("move.beta", "ema_gate.ema_expansion_matrix") + elif "mega_layer.beta" in module_name: + new_module_name = module_name.replace("beta", "qk_bias") + else: + new_module_name = module_name.replace("beta", "b_param") + # beta is used in EMA and MovingAverageGatedAttention, and must be renamed due to flax/tf weights + elif "gamma" in module_name: + if "move.gamma" in module_name: + new_module_name = module_name.replace("move.gamma", "ema_gate.kernel_projection_matrix") + elif "mega_layer.gamma" in module_name: + new_module_name = module_name.replace("gamma", "qk_weight") + else: + new_module_name = module_name.replace("gamma", "g_param") + # alpha is used in EMA and positional bias; renaming to improve readability + elif "move.alpha" in module_name: + new_module_name = module_name.replace("move.alpha", "ema_gate.decay_factor") + # delta is only used in EMA; renaming to improve readability + elif "move.delta" in module_name: + new_module_name = module_name.replace("move.delta", "ema_gate.damping_factor") + # omega is only used in EMA; renaming to improve readability + elif "omega" in module_name: + new_module_name = module_name.replace("move.omega", "ema_gate.residual_weight") + + if new_module_name: + updated_keys[module_name] = new_module_name + + if len(updated_keys) != 0: + print(f"Renaming these keys: {updated_keys.keys()}") + else: + print("No need to rename state dict entries") + for old, new in updated_keys.items(): + original_state_dict[new] = original_state_dict.pop(old) + + # now attempt to load the state dictionary with updated names + # note that we now call it `mega.layers` instead of `mega.encoders` due to hugging face style + print("HF Mega encoder:", hf_mlm.mega.layers.load_state_dict(original_state_dict)) + + # load the MLM head weights directly + print( + "HF Mega MLM layer:", + hf_mlm.mlm_head.load_state_dict( + torch.load(os.path.join(pretrained_checkpoint_path, "mlm_head_weights.pt"), map_location="cpu") + ), + ) + + # test on a randomly generated input sequence + input_ids = torch.randint(0, hf_config.vocab_size, size=(4, 256)) + input_mask = torch.ones_like(input_ids) + # mask a few tokens to make sure masking is applied appropriately :) + input_mask[:, -10:] = 0 + + # run forward passes + original_output = original_mlm(input_ids, input_mask, batch_first=True, ignore_mask_value=0) + hf_output = hf_mlm(input_ids, input_mask)[0] + + # print shapes and diff + print(f"original output {original_output.shape}") + print(f"hf output {hf_output.shape}") + print(f"max diff: {(original_output - hf_output).max()}") # 0.0 + success = torch.allclose(original_output, hf_output, atol=1e-3) + + if success: + print("Yay!") + hf_mlm.save_pretrained(output_path) + else: + raise RuntimeError(f"Something's broken :(\nOriginal:\n{original_output}\n\nHF\n{hf_output}\n{hf_mlm}") + + if includes_tokenizer: + print("Transferring tokenizer") + tokenizer = AutoTokenizer.from_pretrained(pretrained_checkpoint_path) + tokenizer.save_pretrained(output_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--pretrained_checkpoint_path", + default=None, + type=str, + required=True, + help="Point to the directory containing your model weights using the official Mega repo", + ) + + parser.add_argument( + "--output_path", default=None, type=str, required=True, help="Location to save the Hugging Face version" + ) + + parser.add_argument( + "--includes_tokenizer", + action="store_true", + help="Use this flag if there is a Hugging Face tokenizer in the original checkpoint repo", + ) + + args = parser.parse_args() + + convert_checkpoint_to_huggingface(args.pretrained_checkpoint_path, args.output_path, args.includes_tokenizer) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py b/venv/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py new file mode 100644 index 0000000000000000000000000000000000000000..069c717a737572bb797289bb1f25d7cb8922a4dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mega/modeling_mega.py @@ -0,0 +1,2273 @@ +# coding=utf-8 +# Copyright 2023 The Mega Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch MEGA model.""" + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import ALL_LAYERNORM_LAYERS +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_mega import MegaConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "mnaylor/mega-base-wikitext" +_CONFIG_FOR_DOC = "MegaConfig" + + +from ..deprecated._archive_maps import MEGA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class MegaEmbeddings(nn.Module): + """ + Mega's basic implementation does not incorporate token type embeddings, so this is a stripped-down version of + RoBERTa's embeddings which optionally includes token types + """ + + def __init__(self, config: MegaConfig): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.use_token_types = config.add_token_type_embeddings + if self.use_token_types: + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + # registering a buffer here allows model tracing when not passing optional token type IDs + # more info at transformers issue #5664 + self.register_buffer( + "token_type_ids", torch.zeros(config.max_positions, dtype=torch.long).expand((1, -1)), persistent=False + ) + + self.padding_idx = config.pad_token_id + + def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): + if (input_ids is None) and (inputs_embeds is None): + raise ValueError("Must provide one of input_ids or inputs_embeds") + elif input_ids is not None: + input_shape = input_ids.size() + device = input_ids.device + + # get the word embeddings if only IDs are provided + inputs_embeds = self.word_embeddings(input_ids) + else: + input_shape = inputs_embeds.size()[:-1] + device = inputs_embeds.device + + # the original Mega implementation did not include token type embeddings, so we add + # an option to use them if desired; if embeddings are present and token type IDs are + # not provided, we will use a registered buffer (which helps with tracing) + if self.use_token_types: + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, : input_shape[1]] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], input_shape[1]) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # access token type embeddings + token_type_embeddings = self.token_type_embeddings(token_type_ids) + # add the token type embeddings to the word embeddings + embeddings = inputs_embeds + token_type_embeddings + else: + embeddings = inputs_embeds + return embeddings + + +class MegaSimpleRelativePositionalBias(nn.Module): + """ + Simple relative positional embeddings copied from the Mega repo; renamed variables for better readability + """ + + def __init__(self, config: MegaConfig): + super().__init__() + self.config = config + self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size + self.rel_pos_bias = nn.Parameter(torch.Tensor(2 * config.max_positions - 1)) + + def forward(self, seq_len): + if seq_len > self.max_positions: + raise ValueError("Sequence length {} going beyond max length {}".format(seq_len, self.max_positions)) + + # seq_len * 2 - 1 + bias = self.rel_pos_bias[(self.max_positions - seq_len) : (self.max_positions + seq_len - 1)] + # seq_len * 3 - 1 + tile = F.pad(bias, (0, seq_len)) + # (seq_len * 3 - 1) * seq_len + tile = torch.tile(tile, (seq_len,)) + tile = tile[:-seq_len] + # seq_len x (3 * seq_len - 2) + tile = tile.view(seq_len, 3 * seq_len - 2) + start = (2 * seq_len - 1) // 2 + end = tile.size(1) - start + tile = tile[:, start:end] + return tile + + +class MegaRotaryRelativePositionalBias(nn.Module): + """ + Rotary relative bias for positional information; similar in concept to RoPE (i.e. RoFormer) but taken from the Mega + repo due to differences in implementation. + + When initialized, produces a positional bias which ranges from position 0 to config.max_positions, but can + extrapolate to longer sequences. Can be indexed according to input position IDs + """ + + def __init__(self, config: MegaConfig): + super().__init__() + if config.hidden_size % 2 != 0: + raise RuntimeError("Rotary positional bias requires `hidden_size` to be a multiple of 2") + self.config = config + self.embed_dim = config.shared_representation_size + self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size + self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings( + config.max_positions, self.embed_dim + ) + # alpha and beta parameters for the rotary bias; beta renamed to b_param to avoid clashes with tf/flax weight handling + # in loading pretrained weights + self.alpha = nn.Parameter(torch.Tensor(1, self.embed_dim)) + self.b_param = nn.Parameter(torch.Tensor(1, self.embed_dim)) + self.register_buffer("_float_tensor", torch.FloatTensor([0.0])) + + @staticmethod + def get_sinusoid_embeddings(max_positions: int, embedding_dim: int): + half_dim = embedding_dim // 2 + emb = math.log(10000) / half_dim + emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) + emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) + return torch.sin(emb), torch.cos(emb) + + def rotary(self, input): + seq_len, embed_dim = input.size() + chunk_1, chunk_2 = torch.chunk(input, 2, dim=-1) + if self.sine is None or seq_len > self.sine.size(0): + self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(seq_len, embed_dim) + self.max_positions = seq_len + self.sine = self.sine.to(self._float_tensor) + self.cosine = self.cosine.to(self._float_tensor) + + sin = self.sine[:seq_len] + cos = self.cosine[:seq_len] + return torch.cat([chunk_1 * cos - chunk_2 * sin, chunk_2 * cos + chunk_1 * sin], dim=1) + + def forward(self, seq_len): + rotary_alpha = self.rotary(self.alpha.expand(seq_len, self.embed_dim)) + rotary_beta = self.rotary(self.b_param.expand(seq_len, self.embed_dim)) + bias = torch.einsum("mk,nk->mn", rotary_alpha, rotary_beta) + return bias + + +class MegaDropout(nn.Module): + """ + A unified class for standard dropout functionality and featurewise dropout. + + The original fairseq Mega repo used 2 classes for these, which included some unnecessary handling of training logic + and an unused `inplace` option. The original implementation used torch.nn.functional instead of submodules, which + is retained here as well. + """ + + def __init__(self, dropout_probability, is_featurewise=False): + super().__init__() + self.dropout_probability = dropout_probability + self.is_featurewise = is_featurewise + + def forward(self, input, batch_first: bool = False): + if self.is_featurewise: + if batch_first: + # (batch_size X sequence_length X feature_dimension) + # -> (batch_size X feature_dimension X sequence_length) + # -> (batch_size X sequence_length X feature_dimension) + return F.dropout2d( + input.transpose(-1, -2), p=self.dropout_probability, training=self.training + ).transpose(-1, -2) + else: + if input.dim() != 3: + raise ValueError( + "Feature dropout inputs must be exactly 3-dimensional if inputs are ordered [sequence length, batch size, hidden dimension]" + ) + # (sequence_length X batch_size X feature_dimension) + # -> (batch_size X feature_dimension X sequence_length) + # -> (sequence_length X batch_size X feature_dimension) + return F.dropout2d(input.permute(1, 2, 0), p=self.dropout_probability, training=self.training).permute( + 2, 0, 1 + ) + else: + return F.dropout(input, p=self.dropout_probability, training=self.training) + + +class MegaRMSNorm(nn.Module): + """ + RMSNorm used in Mega implementation. Differs from T5's RMSNorm by applying the weight prior to taking the square + root (as opposed to after in T5) + """ + + def __init__(self, number_features, eps=1e-6, affine=True): + super().__init__() + self.num_features = number_features + self.eps = eps + self.affine = affine + if affine: + self.weight = nn.Parameter(torch.Tensor(self.num_features)) + else: + self.register_parameter("weight", None) + + def forward(self, input): + mean_square = torch.mean(torch.square(input), dim=-1, keepdim=True) + if self.weight is not None: + input = input * self.weight + + input * torch.rsqrt(mean_square + self.eps) + return input + + +class MegaScaleNorm(nn.Module): + """ + Scale normalization introduced in MEGA which is similar to RMSNorm, but uses a single parameter for scalar + multiplication instead of a vector, and applies over a specified dimension + """ + + def __init__(self, dim, eps=1e-6, affine=True): + super().__init__() + self.dim = dim + self.eps = eps + self.affine = affine + if affine: + self.scalar = nn.Parameter(torch.Tensor(1)) + else: + self.register_parameter("scalar", None) + + def forward(self, input): + mean_square = torch.mean(torch.square(input), dim=self.dim, keepdim=True) + if self.scalar is not None: + input = self.scalar * input + + output = input * torch.rsqrt(mean_square + self.eps) + return output + + +class MegaSequenceNorm(nn.Module): + """ + A wrapper class for various layer normalization options used in Mega. Used to handle differences in expectations on + input axis locations for different normalization methods. + """ + + def __init__(self, norm_type, embedding_dim, eps=1e-5, affine=True, export=False): + super().__init__() + if norm_type == "layernorm": + self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine=affine) + elif norm_type == "scalenorm": + self.norm = MegaScaleNorm(dim=-1, eps=eps, affine=affine) + elif norm_type == "rmsnorm": + self.norm = MegaRMSNorm(embedding_dim, eps=eps, affine=affine) + elif norm_type == "batchnorm": + self.norm = nn.BatchNorm1d(embedding_dim, eps=eps, affine=affine) + elif norm_type == "syncbatchnorm": + self.norm = nn.SyncBatchNorm(embedding_dim, eps=eps, affine=affine) + else: + raise ValueError("Unknown norm type: {}".format(norm_type)) + + def forward(self, input): + if isinstance(self.norm, nn.modules.batchnorm._BatchNorm): + if input.dim() != 3: + raise ValueError("BatchNorm inputs must be exactly 3-dimensional") + input = input.permute(1, 2, 0) + input = self.norm(input) + return input.permute(2, 0, 1) + else: + return self.norm(input) + + +# add this layernorm class to ALL_LAYERNORM_LAYERS +ALL_LAYERNORM_LAYERS.append(MegaSequenceNorm) + + +class MegaMultiDimensionDampedEma(nn.Module): + """ + Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of + variable names and moving away from the stateful representation of incremental decoding state. See + "https://arxiv.org/abs/2209.10655" for more details. + """ + + def __init__(self, config: MegaConfig): + super().__init__() + + self.config = config + + self.embed_dim = config.hidden_size + self.ndim = config.ema_projection_size + self.bidirectional = config.bidirectional + self.truncation = config.truncation + self.scale = math.sqrt(1.0 / self.ndim) + + kernel_dim = 2 * config.hidden_size if self.bidirectional else config.hidden_size + # renamed delta (damping_factor) and alpha (decay_factor) to be more descriptive of what the parameters are doing + self.damping_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) + self.decay_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) + # renamed gamma (kernel_projection_matrix) and beta (ema_expansion_matrix) respectively to avoid HF renaming + # things and align with the paper's description of these params' behavior + self.ema_expansion_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1)) + self.kernel_projection_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim)) + # renamed omega to residual_weight to describe what it's doing + self.residual_weight = nn.Parameter(torch.Tensor(config.hidden_size)) + self._kernel = None + self._coeffs = None + + def _compute_ema_coefficients(self): + self._coeffs = None + # convert the alpha and delta parameters (kernel_dim x EMA projection size x 1) to [0, 1] with sigmoid + damping_factor = torch.sigmoid(self.damping_factor) + decay_factor = torch.sigmoid(self.decay_factor) + previous_timestep_weight = 1.0 - damping_factor * decay_factor + return damping_factor, previous_timestep_weight + + def _compute_efficient_ema_kernel(self, length: int): + # computes the kernel used for efficient damped EMA applied via FFT convolution + self._kernel = None + # p and q have shape (kernel_dim x ema_projection_size x 1) + damping_factor, previous_timestep_weight = self._compute_ema_coefficients() + # extend the kernel to (kernel_dim X ema_projection_size X sequence_length) and + # multiply q by sequential ints up to the sequence length + vander = torch.arange(length).to(damping_factor).view(1, 1, length) * torch.log(previous_timestep_weight) + kernel = (damping_factor * self.ema_expansion_matrix) * torch.exp(vander) + # (kernel_dim X ema_projection_size X sequence_length) -> (kernel_dim, sequence_length) + return torch.einsum("dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale) + + def get_ema_coefficients(self): + if self.training: + return self._compute_ema_coefficients() + else: + if self._coeffs is None: + self._coeffs = self._compute_ema_coefficients() + return self._coeffs + + def get_ema_kernel(self, length: int): + kernel_size = length if self.truncation is None else min(self.truncation, length) + if self.training: + return self._compute_efficient_ema_kernel(kernel_size) + else: + if self._kernel is None or self._kernel.size(-1) < kernel_size: + self._kernel = self._compute_efficient_ema_kernel(kernel_size) + return self._kernel[..., :kernel_size] + + def fft_convolution(self, inputs, kernel, length): + # this is a wrapper for repeated use of EMA calculation via FFT (fast Fourier transform) convolution + inputs_fft = torch.fft.rfft(inputs.float(), n=2 * length) + kernel_fft = torch.fft.rfft(kernel.float(), n=2 * length) + convolved_sequence = torch.fft.irfft(inputs_fft * kernel_fft, n=2 * length) + return convolved_sequence + + def ema_step(self, inputs, length, past_state=None): + if length == 1: + return self.one_ema_step(inputs, past_state=past_state) + + # (kernel_dim X ema_projection_size X 1) + damping_factor, previous_timestep_weight = self.get_ema_coefficients() + # (kernel_dim X ema_projection_size X 1+sequence_length) + vander = torch.arange(length + 1).to(damping_factor).view(1, 1, length + 1) * torch.log( + previous_timestep_weight + ) + vander = torch.exp(vander) + if past_state is not None: + # (kernel_dim X ema_projection_size X sequence_length) * (kernel_dim X ema_projection_size X 1) + # -> (kernel_dim X ema_projection_size X sequence_length) + past_ema_proj = vander[:, :, 1:] * (self.kernel_projection_matrix * self.scale).unsqueeze(-1) + # past_state will be (batch_size, kernel_dim, ema_projection_size) + past_ema_state = torch.einsum("bdn,dnl->bdl", past_state, past_ema_proj) + # (kernel_dim X ema_projection_size) * (batch_size X kernel_dim X ema_projection_size) + # -> (batch_size X kernel_dim X ema_projection_size) + past_vandermonde = vander[:, :, -1] * past_state + else: + past_ema_state = None + past_vandermonde = None + + # (kernel_dim X ema_projection_size X sequence_length) + vander = vander[:, :, :-1] + kernel = (damping_factor * self.ema_expansion_matrix) * vander + kernel_proj = torch.einsum("dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale) + + ema_output = self.fft_convolution(inputs, kernel_proj, length=length)[..., 0:length] + ema_output = ema_output.type_as(inputs) + if past_ema_state is not None: + ema_output = ema_output + past_ema_state + + updated_hidden_state = torch.einsum("bdl,dnl->bdn", inputs, torch.flip(kernel, dims=[2])) + if past_vandermonde is not None: + updated_hidden_state = updated_hidden_state + past_vandermonde + # return a tuple: + # (sequence_length, batch_size, kernel_dim) + # (batch_size, kernel_dim, ema_projection_size) + return ema_output.permute(2, 0, 1), updated_hidden_state + + def one_ema_step(self, inputs, past_state=None): + damping_factor, previous_timestep_weight = self.get_ema_coefficients() + # (kernel_dim X ema_projection_size) x (batch_size X kernel_dim X 1) + # -> (batch_size X kernel_dim X ema_projection_size) + updated_state = (damping_factor * self.ema_expansion_matrix).squeeze(-1) * inputs + if past_state is not None: + updated_state = updated_state + previous_timestep_weight.squeeze(-1) * past_state + # (batch_size X kernel_dim) + out = torch.einsum("bdn,dn->bd", updated_state, self.kernel_projection_matrix * self.scale) + # (1 X batch_size X kernel_dim), (batch_size X kernel_dim X ema_projection_size) + return out.unsqueeze(0), updated_state + + def forward( + self, + inputs, + attention_mask: Optional[torch.Tensor] = None, + prev_state: Optional[torch.Tensor] = None, + use_cache: bool = False, + ) -> torch.Tensor: + """ + Mega's exponential moving average (EMA) sub-layer applied prior to single-headed (traditional) self-attention + + Args: + inputs (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): + Hidden state / embedding input to update via EMA based on FFT convolution + attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates which inputs are to be ignored (mostly due to padding), where elements are either 1 for *not + masked* or 0 for *masked* + prev_state (`torch.Tensor` of shape `(batch_size, config.ndim)`, *optional*): + The hidden state returned from the previous timestep during incremental decoding. + use_cache (`bool`, default `False`): + Whether to perfom incremental decoding; uses `prev_state` as the prior timestep, and returns the + updated EMA hidden state for use in the next step + + Returns: + `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and + inputs: + - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden + states updated by EMA, with same shapes as inputs + - **updated_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor of shape `(batch_size, + config.ndim)` -- The incremental EMA state for use in the next step of incremental decoding + """ + + seq_len, bsz, embed_dim = inputs.size() + if embed_dim != self.embed_dim: + raise ValueError( + f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}" + ) + + # sequence_length X batch_size X hidden_size + residual = inputs * self.residual_weight + + # (sequence_length x batch_size x hidden_size) -> (batch_size x hidden_size x sequence_length) + inputs = inputs.permute(1, 2, 0) + # mask the input: output is a tensor with 0 in the masked positions + if attention_mask is not None: + inputs = inputs * (attention_mask.unsqueeze(1).type_as(inputs)) + + if self.bidirectional and use_cache: + raise RuntimeError("Bidirectional EMA does not support incremental state") + + if use_cache: + out, updated_state = self.ema_step(inputs, seq_len, past_state=prev_state) + + # (batch_size X hidden_size) -> (1 x batch_size x hidden_size) + out = F.silu(out + residual) + + # if incremental decoding, return the new state along with the output + return out, updated_state + else: + # (hidden_size x sequence_length) + kernel = self.get_ema_kernel(seq_len) + fft_len = seq_len + s_index = 0 + kernel_size = kernel.size(1) + if self.bidirectional: + # split the kernel for each direction of EMA + k1, k2 = torch.split(kernel, [self.embed_dim, self.embed_dim], dim=0) + # (hidden_size X 2*sequence_length - 1) + kernel = F.pad(k1, (kernel_size - 1, 0)) + F.pad(k2.flip(-1), (0, kernel_size - 1)) + inputs = F.pad(inputs, (kernel_size - 1, 0)) + fft_len = fft_len + kernel_size - 1 + s_index = 2 * kernel_size - 2 + + ema_output = self.fft_convolution(inputs, kernel, length=fft_len)[..., s_index : s_index + seq_len] + ema_output = ema_output.type_as(inputs) + # (batch_size X hidden_size X sequence_length) -> (sequence_length X batch_size X hidden_size) + gated_ema_output = F.silu(ema_output.permute(2, 0, 1) + residual) + + return gated_ema_output, None + + +class MegaGatedCrossAttention(nn.Module): + """ + Gated Structured State Attention for use in encoder-decoder model. See Mega paper for more details. Only + modifications from original implementation are variable names, removing the unnecessary `before_attn_fn` and + `static_kv` arguments, and the stateful representation of incremental decoder state. + """ + + def __init__(self, config: MegaConfig): + super().__init__() + + self.config = config + self.activation = ACT2FN[self.config.activation] + self.attention_activation = self.config.attention_activation + self.scaling = self.config.shared_representation_size**-0.5 if self.attention_activation == "softmax" else None + + self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) + self.hidden_dropout = MegaDropout( + self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout + ) + # Attention dropout is standard dropout + self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) + + self.prenorm = self.config.normalize_before_mega + self.norm = MegaSequenceNorm( + self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine + ) + + self.k_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size) + self.v_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) + self.q_proj = nn.Linear( + self.config.hidden_size, 2 * self.config.hidden_size + self.config.shared_representation_size + ) + self.h_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size) + + if self.config.relative_positional_bias == "simple": + self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) + elif self.config.relative_positional_bias == "rotary": + self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) + else: + raise ValueError("unknown relative position bias: {}".format(self.config.relative_positional_bias)) + + self.softmax = nn.Softmax(dim=-1) + + def element_attention(self, query, key, key_padding_mask, pidx): + bsz, src_len, _ = key.size() + tgt_len = query.size(1) if pidx is None else pidx + 1 + if key_padding_mask is not None: + # (batch_size X source_sequence_length) --> (batch_size X 1 X 1) + lengths = key_padding_mask.sum(dim=-1).view(bsz, 1, 1) + else: + lengths = src_len + + # (target_sequence_length X source_sequence_length) + bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] + if pidx is not None: + if query.size(1) != 1: + raise ValueError("Position offset provided with queries longer than 1 token") + # source_sequence_length + bias = bias[pidx] + else: + # (target_sequence_length X source_sequence_length) + bias = bias[:tgt_len] + + # (batch_size X target_sequence_length X source_sequence_length) + qk = torch.bmm(query, key.transpose(1, 2)) / lengths + bias + + attn_weights = ACT2FN[self.attention_activation](qk).type_as(qk) + + if key_padding_mask is not None: + attn_weights = attn_weights * key_padding_mask.unsqueeze(1) + + return attn_weights + + def softmax_attention(self, query, key, key_padding_mask, pidx): + bsz, src_len, _ = key.size() + tgt_len = query.size(1) if pidx is None else pidx + 1 + + # (target_sequence_length X source_sequence_length) + bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len] + if pidx is not None: + if query.size(1) != 1: + raise ValueError("Position offset provided with queries longer than 1 token") + # source_sequence_length + bias = bias[pidx] + else: + # (target_sequence_length X source_sequence_length) + bias = bias[:tgt_len] + + # scaled attention + query = query * self.scaling + # (batch_size X target_sequence_length X source_sequence_length) + qk = torch.bmm(query, key.transpose(1, 2)) + bias + + if key_padding_mask is not None: + qk = qk.masked_fill((1 - key_padding_mask).unsqueeze(1).to(torch.bool), float("-inf")) + + attn_weights = self.softmax(qk).type_as(qk) + return attn_weights + + def forward( + self, + query, + key: Optional[torch.Tensor], + value: Optional[torch.Tensor], + key_padding_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Gated cross-attention used in Mega + + Args: + query (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): + The self (or target) sequence input used as query inputs for cross-attention + key (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): + The cross (or source) sequence input with shape used as keys in cross-attention + value (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`): + The cross (or source) sequence input with shape used as values in cross-attention + key_padding_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): + Padding mask corresponding to the source sequence, where entries are 1 for *not masked* and 0 for + *masked* tokens + past_key_values (`tuple(torch.FloatTensor)`, *optional*): + If provided, the hidden state returned from the previous timestep during incremental decoding; expects + that prior cross-attention keys and values will be the last two items in the tuple + output_attentions (`bool`, defaults to `False`): + Whether or not to return the cross-attention weights. + use_cache (`bool`, defaults to `False`): + Whether to perfom incremental decoding; uses `prev_state` as the prior timestep, and returns the + updated EMA hidden state for use in the next step + + Returns: + `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and + inputs: + - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- + Hidden states from target sequence updated by gated cross-attention + - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape + `(batch_size, source_sequence_length, target_sequence_length)` -- The pairwise cross-attention weights + corresponding to each token in the source and target sequences + - **cross_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in + the next step of incremental decoding + - **cross_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step + of incremental decoding + """ + + seq_len, bsz, embed_dim = query.size() + if embed_dim != self.config.hidden_size: + raise ValueError( + f"Unexpected embedding dimension received: input is {embed_dim} but expected {self.config.hidden_size}" + ) + + if past_key_values is not None: + # make sure the inputs only have a sequence length of 1 if we're doing incremental decoding + if seq_len != 1: + raise ValueError(f"Incremental decoding requested with self-sequence length > 1: {seq_len}") + # expect past_key_values to have (self_key, self_value, self_ema, cross_key, cross_value) + prev_cross_key, prev_cross_value = past_key_values[-2:] + key = value = None + + # use the self-attention cache to get the position id of the current step + prev_self_key = past_key_values[0] + num_incremental_steps = prev_self_key.size(1) + 1 + else: + prev_cross_key = prev_cross_value = None + # we still need the position id if we're doing incremental decoding (past_key_values will be None for the first step) + num_incremental_steps = 0 if use_cache and (seq_len == 1) else None + + full_query = query + if self.prenorm: + full_query = self.norm(full_query) + + # (target_sequence_length X batch_size X 2*hidden_size + shared_representation_size) + query_projected = self.q_proj(full_query) + # split the query projections into separate components + # - residual_weight is passed through sigmoid and sent through elementwise multiplication to the gated/weighted targets prior to being added to the query directly + # - target_gate is a silu-gated tensor that is multiplied by the attention-weighted target below prior to residual connection + # - attention_query is the part that is passed to the attention function + residual_weight, target_gate, attention_query = torch.split( + query_projected, + [self.config.hidden_size, self.config.hidden_size, self.config.shared_representation_size], + dim=-1, + ) + + # (target_sequence_length X batch_size X hidden_size) + residual_weight = torch.sigmoid(residual_weight) + target_gate = F.silu(target_gate) + + if key is None: + if value is not None: + raise ValueError("Key and value must be `None` simultaneously") + projected_key = projected_value = None + else: + # (source_sequence_length X batch_size X shared_representation_size) + projected_key = self.k_proj(key) + # (source_sequence_length X batch_size X hidden_size) + projected_value = self.activation(self.v_proj(key)) + + # (target_sequence_length X batch_size X shared_representation_size) + # -> (batch_size X target_sequence_length X shared_representation_size) + attention_query = attention_query.transpose(0, 1) + if projected_key is not None: + projected_key = projected_key.transpose(0, 1) + if projected_value is not None: + projected_value = projected_value.transpose(0, 1) + + # if we're doing incremental decoding, k and v are None and need to be overwritten with past values + if past_key_values is not None: + projected_key = prev_cross_key + projected_value = prev_cross_value + + # if we're returning the cache for later use, store these now for later return (can be done without having past_key_values provided) + if use_cache: + updated_cross_key = projected_key + updated_cross_value = projected_value + + ctx_len = projected_key.size(1) + # This is part of a workaround to get around fork/join parallelism + # not supporting Optional types. + if key_padding_mask is not None and key_padding_mask.dim() == 0: + key_padding_mask = None + + if key_padding_mask is not None: + if key_padding_mask.size(0) != bsz: + raise ValueError("Key padding mask does not align on the batch dimension") + if key_padding_mask.size(1) != ctx_len: + raise ValueError("Key padding mask does not align on the sequence length dimension") + + if self.attention_activation == "softmax": + attn_weights = self.softmax_attention( + attention_query, projected_key, key_padding_mask, num_incremental_steps + ) + else: + attn_weights = self.element_attention( + attention_query, projected_key, key_padding_mask, num_incremental_steps + ) + + projected_value = self.hidden_dropout(projected_value, batch_first=True) + kernel = self.attention_dropout(attn_weights) + # (batch_size X target_sequence_length X hidden_size) + # -> (target_sequence_length X batch_size X hidden_size) + weighted_targets = torch.bmm(kernel, projected_value).transpose(0, 1) + # (target_sequence_length X batch_size X hidden_size) + weighted_targets = self.activation(self.h_proj(weighted_targets * target_gate)) + weighted_targets = self.dropout(weighted_targets) + out = torch.addcmul(query, residual_weight, weighted_targets - query) + + if not self.prenorm: + out = self.norm(out) + + outputs = (out, attn_weights) if output_attentions else (out,) + if use_cache: + outputs = outputs + (updated_cross_key, updated_cross_value) + + return outputs + + +class MegaMovingAverageGatedAttention(nn.Module): + """ + Pure PyTorch implementation of Mega block; see https://arxiv.org/abs/2209.10655 and original fairseq implementation + at https://github.com/facebookresearch/mega (copyright Meta Research, licensed under MIT License) + + Differences from original implementation include hidden state refactor and fixed inconsistency with additive / + multiplicative attention masks + """ + + def __init__(self, config: MegaConfig): + super().__init__() + self.config = config + self.activation = ACT2FN[self.config.activation] + self.scaling = ( + self.config.shared_representation_size**-0.5 if self.config.attention_activation == "softmax" else None + ) + self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) + self.hidden_dropout = MegaDropout( + self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout + ) + # attention dropout is standard dropout + self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False) + + self.norm = MegaSequenceNorm( + self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine + ) + self.ema_gate = MegaMultiDimensionDampedEma(config) + + self.v_proj = nn.Linear(self.config.hidden_size, self.config.intermediate_size) + self.mx_proj = nn.Linear( + self.config.hidden_size, + self.config.shared_representation_size + self.config.intermediate_size + 2 * self.config.hidden_size, + ) + self.h_proj = nn.Linear(self.config.intermediate_size, self.config.hidden_size) + + self.qk_weight = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) + self.qk_bias = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size)) + + if self.config.relative_positional_bias == "simple": + self.rel_pos_bias = MegaSimpleRelativePositionalBias(config) + elif self.config.relative_positional_bias == "rotary": + self.rel_pos_bias = MegaRotaryRelativePositionalBias(config) + else: + raise ValueError(f"Unknown relative positional bias: {self.config.relative_positional_bias}") + + self.softmax = nn.Softmax(dim=-1) + self.attention_function = ( + self.softmax_attention if self.config.attention_activation == "softmax" else self.element_attention + ) + + def element_attention(self, query, key, padding_mask, causal_mask): + """ + Apply element-wise attention via relu^2 or laplace. Same as original implementation but with standardized + causal attention mask. Expects the Hugging Face standard attention mask paradigm: 1 for not masked, and 0 for + masked. + """ + seq_len = key.size(2) + if padding_mask is not None: + # (batch_size X number of chunks X 1) + lengths = padding_mask.sum(-1, keepdim=True) + # (batch_size X number of chunks X 1 X 1) + lengths = lengths.clamp(min=1.0).unsqueeze(-1) + else: + lengths = seq_len + + if causal_mask is not None: + lengths = causal_mask.sum(dim=-1, keepdim=True) + + # (sequence_length X sequence_length) + bias = self.rel_pos_bias(seq_len) + if seq_len != query.size(2): + if query.size(2) != 1: + raise ValueError("Size mismatch between Q and K in element attention") + # (1 X sequence_length) + bias = bias[-1:] + + # (batch_size X number of chunks X sequence_length X sequence_length) + qk = torch.matmul(query, key.transpose(2, 3)) / lengths + bias + + attn_weights = ACT2FN[self.config.attention_activation](qk).type_as(qk) + + if padding_mask is not None: + attn_weights = attn_weights * padding_mask.unsqueeze(2) + + if causal_mask is not None: + attn_weights = attn_weights * causal_mask + + return attn_weights + + def softmax_attention(self, query, key, padding_mask, causal_mask): + "Standard softmax self-attention, as in the original Transformer paper" + seq_len = key.size(2) + # (sequence_length X sequence_length) + bias = self.rel_pos_bias(seq_len) + if seq_len != query.size(2): + if query.size(2) != 1: + raise ValueError("Size mismatch between Q and K in softmax attention") + # (1 X sequence_length) + bias = bias[-1:] + + # scaled attention + query = query * self.scaling + + # (batch_size x number of chunks x chunk_size x chunk_size) if chunking + # (batch_size x 1 x sequence_length x sequence_length) otherwise + qk = torch.matmul(query, key.transpose(2, 3)) + bias + + # apply causal mask (presumed to be 1/0 for not masked / masked) + # additive, but convert to 0/-inf (which is not explicitly in the Mega source code) + if causal_mask is not None: + additive_causal_mask = torch.zeros_like(causal_mask, dtype=qk.dtype) + additive_causal_mask = additive_causal_mask.masked_fill((1 - causal_mask).bool(), float("-inf")) + qk = qk + additive_causal_mask + + if padding_mask is not None: + # 1 for tokens which are *not masked* + # 0 for tokens which are *masked* + # replace masked tokens with -inf to make softmax ignore them + # need to invert the padding mask to match what mega original did + padding_mask = 1 - padding_mask + padding_mask_all = padding_mask.all(dim=-1, keepdim=True) + padding_mask = torch.logical_and(padding_mask, ~padding_mask_all) + qk = qk.masked_fill(padding_mask.unsqueeze(2).to(torch.bool), float("-inf")) + + attn_weights = self.softmax(qk).type_as(qk) + return attn_weights + + def forward( + self, + input, + padding_mask: Optional[torch.Tensor] = None, + causal_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.Tensor]] = None, + output_attentions=False, + use_cache=False, + ): + """ + Mega's self-attention block, which combines multi-headed EMA with traditional self-attention + + Args: + input (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`): + Hidden states to be updated by Mega's self-attention + padding_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* + or 0 for *masked* + causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not + masked* or 0 for *masked* + past_key_values (`tuple(torch.Tensor)`, *optional*): + The hidden states returned from the previous timestep during incremental decoding; expects that + self-attention key, value, and EMA states are the first 3 entries in the tuple + output_attentions (`bool`, default `False`): + Whether to return self-attention weights + use_cache (`bool`, default `False`): + Whether to perfom incremental decoding; uses `past_key_values` as prior state, and returns the updated + states for use in the next step + + Returns: + `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and + inputs: + - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden + states from target sequence updated by Mega's self-attention + - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape + `(batch_size, 1, sequence_length, sequence_length)` -- The self-attention weights corresponding to how + each token in the input sequence attends to every other token + - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next + step of incremental decoding + - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of + incremental decoding + - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape + `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. + """ + + seq_len, bsz, embed_dim = input.size() + if embed_dim != self.config.hidden_size: + raise ValueError(f"Input embedding dimension should be {self.config.hidden_size}; received {embed_dim}") + + # store inputs for residual connection and handle pre-norm if requested + residual = input + if self.config.normalize_before_mega: + input = self.norm(input) + + # (sequence_length X batch_size X hidden_size) -> (sequence_length X batch_size X intermediate_size) + value = self.activation(self.v_proj(input)) + + # unpack the incremental state if provided + # assumed to be (self K, self V, self EMA state, cross K, cross V) + # also assumes that incremental decoding is working one token at a time, so input sequence length must be 1 + if self.config.is_decoder and (past_key_values is not None): + if seq_len > 1: + raise ValueError(f"Incremental decoding only supports self sequence length of 1; received {seq_len}") + # the first 3 items in the saved states will be these regardless of whether cross-attention is present + prev_self_key, prev_self_value, prev_ema_state = past_key_values[0:3] + else: + prev_self_key = prev_self_value = prev_ema_state = None + + # ema output is (sequence_length x batch_size x hidden_size) + # updated_ema_state will be None if use_cache=False; otherwise (batch_size, config.ndim) + ema_out, updated_ema_state = self.ema_gate( + input, attention_mask=padding_mask, prev_state=prev_ema_state, use_cache=use_cache + ) + ema_out = self.dropout(ema_out) + + # (sequence_length X batch_size X hidden_size) + # -> (sequence_length X batch_size X 2*hidden_size + config.shared_representation_size + config.intermediate_size) + # - residual_weight -> sigmoid -> applied to residual connection in torch.addcmul + # - query_key_gates -> split into two components: query_key becomes query and key for attention input, gates becomes gating for self-attention output + # - intermediate_state -> added to weighted attention output, sent through activation, and has inputs subtracted during + # torch.addcmul to create the final layer output + base = self.mx_proj(ema_out) + residual_weight, query_key_gates, intermediate_state = torch.split( + base, + [ + self.config.hidden_size, + self.config.shared_representation_size + self.config.intermediate_size, + self.config.hidden_size, + ], + dim=-1, + ) + + # (sequence_length X batch_size X hidden_size) + residual_weight = torch.sigmoid(residual_weight) + + # (sequence_length X batch_size X shared_representation_size + intermediate_size) + query_key_gates = F.silu(query_key_gates) + + # split into two different tensors: one for Q/K usage and the other for gating self-attention + query_key, attention_gate = torch.split( + query_key_gates, [self.config.shared_representation_size, self.config.intermediate_size], dim=-1 + ) + + # (sequence_length X batch_size X shared_representation_size) + # -> (sequence_length X batch_size X 1 X shared_representation_size) + # -> (sequence_length X batch_size X 2 X shared_representation_size) + query_key = query_key.unsqueeze(2) * self.qk_weight + self.qk_bias + + # (sequence_length X batch_size X 2 X shared_representation_size) + # -> 2 tensors of (sequence_length X batch_size X shared_representation_size) + query, key = torch.unbind(query_key, dim=2) + + # (sequence_length X batch_size X dimension) + # -> (batch_size X sequence_length X dimension) + # where `dimension` is either shared_representation_size (queries and keys) or intermediate_size (values) + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + if self.config.is_decoder: + # combine history and current to save updated state (if history is provided) + # when chunking is applied, the past states will be None at the end of the chunk, in + # which case, proceed as if no K/V history had been provided + # saved states are stored with shape (batch_size X sequence_length X dimension) + if prev_self_key is not None: + key = torch.cat([prev_self_key, key], dim=1) + if prev_self_value is not None: + value = torch.cat([prev_self_value, value], dim=1) + + # if not chunking, store as-is + if not self.config.use_chunking: + updated_self_key = key + updated_self_value = value + else: + curr_len = key.size(1) % self.config.chunk_size + if curr_len == 0: + # if we're chunking and have reached the end of a chunk, wipe out the saved state + updated_self_key = None + updated_self_value = None + else: + updated_self_key = key + updated_self_value = value + + ctx_len = key.size(1) # potentially differs from seq_len because of incremental decoding + if not self.config.use_chunking: + # if we're not chunking, treat the entire sequence as one long chunk + # (batch_size X sequence_length X dimension) -> (batch_size X 1 X sequence_length X dimension) + query = query.unsqueeze(1) + key = key.unsqueeze(1) + value = value.unsqueeze(1) + if padding_mask is not None: + # (batch_size X sequence_length) -> (batch_size X 1 X sequence_length) + padding_mask = padding_mask.unsqueeze(1) + else: + # otherwise, split the sequences in the batch into `n_chunks` chunks of size `chunk_size` + if seq_len < self.config.chunk_size: + query = query.unsqueeze(1) + else: + # (batch_size X sequence_length X dimension) -> (batch_size X n_chunks X chunk_size X dimension) + n_chunks = seq_len // self.config.chunk_size + query = query.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) + + if ctx_len < self.config.chunk_size: + key = key.unsqueeze(1) + value = value.unsqueeze(1) + if padding_mask is not None: + padding_mask = padding_mask.unsqueeze(1) + else: + # (batch_size X sequence_length X dimension) -> (batch_size X n_chunks X chunk_size X dimension) + n_chunks = ctx_len // self.config.chunk_size + key = key.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size) + value = value.reshape(bsz, n_chunks, self.config.chunk_size, self.config.intermediate_size) + if padding_mask is not None: + padding_mask = padding_mask.view(bsz, n_chunks, self.config.chunk_size) + + # this is in the original Mega implementation to work around fork/join parallelism not supporting optional types + if padding_mask is not None and padding_mask.dim() == 0: + padding_mask = None + + attn_weights = self.attention_function(query, key, padding_mask=padding_mask, causal_mask=causal_mask) + + value = self.hidden_dropout(value, batch_first=True) + kernel = self.attention_dropout(attn_weights) + + # (batch_size x n_chunks x chunk_size x intermediate_size) -> (sequence_length X batch_size X intermediate_size) + weighted_self_output = ( + torch.matmul(kernel, value).view(bsz, seq_len, self.config.intermediate_size).transpose(0, 1) + ) + + # (sequence_length X batch_size X intermediate_size) -> (sequence_length X batch_size X hidden_size) + weighted_self_output = self.activation(intermediate_state + self.h_proj(weighted_self_output * attention_gate)) + weighted_self_output = self.dropout(weighted_self_output) + # (sequence_length X batch_size X hidden_size) + out = torch.addcmul(residual, residual_weight, weighted_self_output - residual) + + if not self.config.normalize_before_mega: + out = self.norm(out) + + return_values = (out, attn_weights) if output_attentions else (out,) + + if self.config.is_decoder: + return_values = return_values + (updated_self_key, updated_self_value, updated_ema_state) + + return return_values + + +class MegaNormalizedFeedForwardNetwork(nn.Module): + """ + Normalized feed-forward network used in Mega blocks. Left as-is from original Mega repo aside from retrieving args + from Hugging Face config + """ + + def __init__(self, config: MegaConfig): + super().__init__() + + self.config = config + self.hidden_dim = config.nffn_hidden_size + self.act_fn = config.activation + self.activation = ACT2FN[config.activation] + + self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout) + self.hidden_dropout = MegaDropout( + self.config.nffn_activation_dropout_prob, is_featurewise=self.config.use_feature_dropout + ) + + self.prenorm = self.config.normalize_before_ffn + self.norm = MegaSequenceNorm( + self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine + ) + + self.fc1 = nn.Linear(self.config.hidden_size, self.config.nffn_hidden_size) + self.fc2 = nn.Linear(self.config.nffn_hidden_size, self.config.hidden_size) + + def forward(self, inputs): + residual = inputs + + if self.prenorm: + inputs = self.norm(inputs) + + hidden = self.activation(self.fc1(inputs)) + hidden = self.hidden_dropout(hidden) + output = self.fc2(hidden) + output = self.dropout(output) + output = output + residual + + if not self.prenorm: + output = self.norm(output) + + return output + + +class MegaBlock(nn.Module): + def __init__(self, config: MegaConfig): + super().__init__() + self.seq_len_dim = 1 + self.mega_layer = MegaMovingAverageGatedAttention(config) + self.nffn = MegaNormalizedFeedForwardNetwork(config) if config.use_normalized_ffn else None + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.cross_attn = MegaGatedCrossAttention(config) + else: + self.cross_attn = None + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + causal_mask: Optional[torch.LongTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[torch.FloatTensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor]: + """ + A single Mega layer: either encoder or decoder, with optional cross-attention and optional normalized + feed-forward layer + + Args: + hidden_states (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`): + Hidden states to be updated by the Mega block + attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indicates which entries in the self/target sequence are to be ignored (mostly due to padding), where + elements are either 1 for *not masked* or 0 for *masked*. Causal attention is enforced internally. + causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*): + Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not + masked* or 0 for *masked* + encoder_hidden_states (`torch.Tensor`, of shape `(source_sequence_length, batch_size, hidden_size)`, *optional*): + Encoder hidden states to be used for cross-attention (and required for encoder-decoder model setup) + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*): + Indicates which entries in the cross/source sequence are to be ignored (mostly due to padding), where + elements are either 1 for *not masked* or 0 for *masked*. + past_key_value (`tuple(torch.Tensor)`, *optional*): + The hidden states returned from the previous timestep during incremental decoding; expects that + self-attention key, value, and EMA states are the first 3 entries in the tuple, and (if doing + cross-attention) cross-attention key and value are the last 2 entries in the tuple + output_attentions (`bool`, default `False`): + Whether to return self-attention weights + use_cache (`bool`, default `False`): + Whether to perfom incremental decoding; uses `past_key_value` as prior state, and returns the updated + states for use in the next step + + Returns: + `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and + inputs: + - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) -- + Hidden states from target sequence updated by Mega + - **self_attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape + `(batch_size, 1, target_sequence_length, target_sequence_length)` -- The self-attention weights + corresponding to how each token in the input sequence attends to every other token + - **cross_attn_weights** (*optional*, returned when `output_attentions=True` and + `config.add_cross_attention=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length, + target_sequence_length)` -- Pairwise cross-attention weights between every entry in the source sequence + and target sequence + - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next + step of incremental decoding + - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size, + sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of + incremental decoding + - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape + `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding. + - **cross_key** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) + `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` -- + The cross-attention key state for use in the next step of incremental decoding + - **cross_value** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`) + `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The + cross-attention value state for use in the next step of incremental decoding + """ + + # incremental decoding in the MegaMultiDimensionDampedEma module requires that the attention mask has the same + # sequence length as the input tensor; if we're caching incremental states, we assume the input + # sequence length is 1 (Mega will break otherwise), so we take the padding mask for the final + # token in the input (mask is received as [batch X sequence length]) + if use_cache and (past_key_value is not None) and (attention_mask is not None): + mega_padding_mask = attention_mask[:, -1].unsqueeze(-1) + else: + mega_padding_mask = attention_mask + + mega_outputs = self.mega_layer( + input=hidden_states, + padding_mask=mega_padding_mask, + causal_mask=causal_mask, + past_key_values=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + new_hidden_states = mega_outputs[0] + self_key, self_value, self_ema_state = mega_outputs[-3:] if use_cache else (None, None, None) + self_attention_weights = mega_outputs[1] if output_attentions else None + + # optional cross attention + if self.cross_attn is not None: + if encoder_hidden_states is None: + raise ValueError("Requested cross-attention without providing encoder hidden states") + + cross_attn_outputs = self.cross_attn( + query=new_hidden_states, + key=encoder_hidden_states, + value=encoder_hidden_states, + key_padding_mask=encoder_attention_mask, + past_key_values=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + # update the hidden state from cross attention + new_hidden_states = cross_attn_outputs[0] + # store cross-attention k/v if caching + cross_key, cross_value = cross_attn_outputs[-2:] if use_cache else (None, None) + cross_attention_weights = cross_attn_outputs[1] if output_attentions else None + + # optional NFFN follows cross attention + if self.nffn is not None: + new_hidden_states = self.nffn(new_hidden_states) + + outs = (new_hidden_states,) + if output_attentions: + outs = outs + (self_attention_weights,) + if self.cross_attn is not None: + outs = outs + (cross_attention_weights,) + + if use_cache: + new_key_values = ( + self_key, + self_value, + self_ema_state, + ) + if self.cross_attn is not None: + new_key_values = new_key_values + (cross_key, cross_value) + + outs = outs + (new_key_values,) + + return outs + + +# copied from transformers.models.roberta.modeling_roberta.RobertaPooler with Roberta->Mega +class MegaPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class MegaPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MegaConfig + base_model_prefix = "mega" + supports_gradient_checkpointing = False + _no_split_modules = ["MegaMovingAverageGatedAttention"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, MegaMultiDimensionDampedEma): + with torch.no_grad(): + # delta & alpha + nn.init.normal_(module.damping_factor, mean=0.0, std=self.config.ema_delta_alpha_range) + nn.init.normal_(module.decay_factor, mean=0.0, std=self.config.ema_delta_alpha_range) + # beta [1, -1, 1, -1, ...] seems more stable. + val = torch.ones(self.config.ema_projection_size, 1) + if self.config.ema_projection_size > 1: + idx = torch.tensor(list(range(1, self.config.ema_projection_size, 2))) + val.index_fill_(0, idx, -1.0) + module.ema_expansion_matrix.normal_(mean=0.0, std=self.config.ema_beta_range).add_(val) + # gamma & omega + nn.init.normal_(module.kernel_projection_matrix, mean=0.0, std=self.config.ema_gamma_omega_range) + nn.init.normal_(module.residual_weight, mean=0.0, std=self.config.ema_gamma_omega_range) + elif isinstance(module, MegaSimpleRelativePositionalBias): + nn.init.normal_(module.rel_pos_bias, mean=0.0, std=self.config.initializer_range) + elif isinstance(module, MegaRotaryRelativePositionalBias): + nn.init.normal_(module.alpha, mean=0.0, std=self.config.initializer_range) + nn.init.normal_(module.b_param, mean=0.0, std=self.config.initializer_range) + elif isinstance(module, MegaScaleNorm): + if self.config.norm_affine: + nn.init.constant_(module.scalar, 1.0) + elif isinstance(module, MegaRMSNorm): + if self.config.norm_affine: + nn.init.constant_(module.weight, 1.0) + elif isinstance(module, MegaMovingAverageGatedAttention): + # linear layers covered separately by the generic nn.Linear init below + nn.init.normal_(module.qk_weight, mean=0.0, std=self.config.initializer_range) + nn.init.constant_(module.qk_bias, 0.0) + elif isinstance(module, nn.Linear): + # initializes all linear layers in the entire network + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +MEGA_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MegaConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MEGA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + This parameter can only be used when the model is initialized with `add_token_type_embeddings` parameter + set to `True`. All the value in this tensor should be always < config.type_vocab_size. + + [What are token type IDs?](../glossary#token-type-ids) + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare MEGA Model transformer outputting raw hidden-states without any specific head on top.", + MEGA_START_DOCSTRING, +) +class MegaModel(MegaPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added after self-attention, following the architecture described in *Mega: Moving Average + Equipped Gated Attention*_ by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig, + Jonathan May, and Luke Zettlemoyer + + To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to + `True` and `bidirectional` set to `False`. To be used in a Seq2Seq model, the model needs to initialized with both + `is_decoder=True` and `bidirectional=False` argument as well as `add_cross_attention` set to `True`; an + `encoder_hidden_states` is then expected as an input to the forward pass. + + .. _*Mega: Moving Average Equipped Gated Attention*: https://arxiv.org/abs/2209.10655 + + """ + + def __init__(self, config: MegaConfig, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embedding_layer = MegaEmbeddings(config) + self.layers = nn.ModuleList([MegaBlock(config) for _ in range(config.num_hidden_layers)]) + + self.pooler = MegaPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing (retained from RoBERTa code) + self.post_init() + + def get_input_embeddings(self): + return self.embedding_layer.word_embeddings + + def set_input_embeddings(self, value): + self.embedding_layer.word_embeddings = value + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + device = inputs_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if self.config.use_chunking: + input_shape = torch.tensor([input_shape[0], self.config.chunk_size]) + + batch_size, sequence_length = input_shape + + if self.config.use_chunking and (sequence_length > self.config.chunk_size): + if sequence_length % self.config.chunk_size != 0: + raise ValueError( + f"config.use_chunking is activated; input sequence length must be shorter than or a multiple of config.chunk_size\nreceived sequence length of {sequence_length} with chunk size {self.config.chunk_size}" + ) + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + + # Mega expects the causal mask to be a 2D square matrix of (from) x (to) over the input sequence length + # the HF utility function generates a 3D causal mask which includes batch size, so we'll create a dummy + # mask with the correct device and all ones + temp_mask_for_extension = torch.ones((1, sequence_length), dtype=torch.long, device=device) + causal_mask = self.create_extended_attention_mask_for_decoder(input_shape, temp_mask_for_extension) + + # get rid of batch dimension in the generated mask; result is (sequence_length X sequence_length) + causal_mask = causal_mask.squeeze(0) + else: + use_cache = False + causal_mask = None + + # if using cache, make sure we have a tuple of tuples which matches the length of our hidden layers + if (past_key_values is not None) and (len(past_key_values) != self.config.num_hidden_layers): + raise ValueError( + f"Received past key/value cache with size mismatch; expected {self.config.num_hidden_layers}, received {len(past_key_values)}" + ) + + # get embeddings (batch X sequence length X embed dim) + embedding_output = self.embedding_layer( + input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds + ) + + # transpose for Mega --> (seq len X batch X embed dim) + hidden_states = embedding_output.transpose(0, 1) + + # we expect encoder hidden states to also have batch first in line + # with typical Hugging Face behavior (which is also how we return them) + # Mega expects sequence length first, so do the same transpose here + if encoder_hidden_states is not None: + encoder_hidden_states = encoder_hidden_states.transpose(0, 1) + + # pass through mega layers + all_hidden_states = (embedding_output,) if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + next_decoder_cache = () if use_cache else None + for i, mega_layer in enumerate(self.layers): + current_decoder_cache = past_key_values[i] if past_key_values is not None else None + mega_outputs = mega_layer( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_mask=causal_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=current_decoder_cache, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = mega_outputs[0] + if output_hidden_states: + # store layer-wise hidden states in the way that the user expects + # (seq len X batch X embed dim) --> (batch X seq len X embed dim) + all_hidden_states += (hidden_states.transpose(0, 1),) + if output_attentions: + self_attn_weights = mega_outputs[1] + all_self_attentions += (self_attn_weights,) + if self.config.add_cross_attention: + cross_attn_weights = mega_outputs[2] + all_cross_attentions += (cross_attn_weights,) + if use_cache: + updated_cache = mega_outputs[-1] + next_decoder_cache += (updated_cache,) + + # transpose final hidden states + hidden_states = hidden_states.transpose(0, 1) + + # optional pooling layer + pooled_output = self.pooler(hidden_states) if self.pooler is not None else None + + if not return_dict: + return (hidden_states, pooled_output) + ( + all_hidden_states, + next_decoder_cache, + all_self_attentions, + all_cross_attentions, + ) + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=hidden_states, + pooler_output=pooled_output, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + """MEGA Model with a `language modeling` head on top for CLM fine-tuning.""", MEGA_START_DOCSTRING +) +class MegaForCausalLM(MegaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config: MegaConfig): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `MegaForCausalLM` as a standalone, add `is_decoder=True.`") + + self.mega = MegaModel(config, add_pooling_layer=False) + + if config.add_lm_hidden_dense_layer: + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.hidden_activation = nn.Tanh() + else: + self.dense = None + self.hidden_activation = None + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, MegaForCausalLM, AutoConfig + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("mnaylor/mega-base-wikitext") + >>> config = AutoConfig.from_pretrained("mnaylor/mega-base-wikitext") + >>> config.is_decoder = True + >>> config.bidirectional = False + >>> model = MegaForCausalLM.from_pretrained( + ... "mnaylor/mega-base-wikitext", config=config, ignore_mismatched_sizes=True + ... ) + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.mega( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + if self.dense is not None: + sequence_output = self.dense(sequence_output) + sequence_output = self.hidden_activation(sequence_output) + + prediction_scores = self.lm_head(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past_key_values is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings("""MEGA Model with a `language modeling` head on top.""", MEGA_START_DOCSTRING) +class MegaForMaskedLM(MegaPreTrainedModel): + _tied_weights_keys = ["mlm_head.weight"] + + def __init__(self, config: MegaConfig): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `MegaForMaskedLM`, set `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.mega = MegaModel(config, add_pooling_layer=False) + if config.add_lm_hidden_dense_layer: + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.hidden_activation = nn.Tanh() + else: + self.dense = None + self.hidden_activation = None + self.mlm_head = nn.Linear(config.hidden_size, config.vocab_size) + self.dropout = nn.Dropout(config.dropout_prob) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.mlm_head + + def set_output_embeddings(self, new_embeddings): + self.mlm_head = new_embeddings + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + expected_output="' Paris'", + expected_loss=0.1, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mega( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + if self.dense is not None: + sequence_output = self.dense(sequence_output) + sequence_output = self.hidden_activation(sequence_output) + prediction_scores = self.mlm_head(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + MEGA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + MEGA_START_DOCSTRING, +) +class MegaForSequenceClassification(MegaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.mega = MegaModel(config, add_pooling_layer=False) + self.classifier = MegaClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mega( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + MEGA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + MEGA_START_DOCSTRING, +) +class MegaForMultipleChoice(MegaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.mega = MegaModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + flat_inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.mega( + flat_input_ids, + token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, + inputs_embeds=flat_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + MEGA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + MEGA_START_DOCSTRING, +) +class MegaForTokenClassification(MegaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.mega = MegaModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mega( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +# copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Mega +class MegaClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +@add_start_docstrings( + """ + MEGA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + MEGA_START_DOCSTRING, +) +class MegaForQuestionAnswering(MegaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.mega = MegaModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mega( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01bbaa1398142c3cca8800450ee52ea58295719f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__init__.py @@ -0,0 +1,73 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_oneformer": ["ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "OneFormerConfig"], + "processing_oneformer": ["OneFormerProcessor"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_oneformer"] = ["OneFormerImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_oneformer"] = [ + "ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", + "OneFormerForUniversalSegmentation", + "OneFormerModel", + "OneFormerPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_oneformer import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, OneFormerConfig + from .processing_oneformer import OneFormerProcessor + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_oneformer import OneFormerImageProcessor + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_oneformer import ( + ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + OneFormerForUniversalSegmentation, + OneFormerModel, + OneFormerPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd6d784f8bdf2075340ce2008b071dcb4d5e0cf3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..109bcd4b97e5876afc8cbc8564492cb7dc35c930 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/configuration_oneformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1122f916462235b873338e0ca9d1283811c9ff2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/convert_to_hf_oneformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39d80e59d83daaf643c34957f077e143d7286db7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/image_processing_oneformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e8f7ab502d7fd87c91d7bc3725e3606fed0336 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4576ae5cc465b4743c99027750acbd9a59ba80f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/processing_oneformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/configuration_oneformer.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/configuration_oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..1cbd2ab7dbc18f098d872cb556cb49655fe85a25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/configuration_oneformer.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""OneFormer model configuration""" +from typing import Dict, Optional + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import ONEFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class OneFormerConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a + OneFormer model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the OneFormer + [shi-labs/oneformer_ade20k_swin_tiny](https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny) architecture + trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + backbone_config (`PretrainedConfig`, *optional*, defaults to `SwinConfig`): + The configuration of the backbone model. + backbone (`str`, *optional*): + Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this + will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` + is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. + use_pretrained_backbone (`bool`, *optional*, defaults to `False`): + Whether to use pretrained weights for the backbone. + use_timm_backbone (`bool`, *optional*, defaults to `False`): + Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers + library. + backbone_kwargs (`dict`, *optional*): + Keyword arguments to be passed to AutoBackbone when loading from a checkpoint + e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. + ignore_value (`int`, *optional*, defaults to 255): + Values to be ignored in GT label while calculating loss. + num_queries (`int`, *optional*, defaults to 150): + Number of object queries. + no_object_weight (`float`, *optional*, defaults to 0.1): + Weight for no-object class predictions. + class_weight (`float`, *optional*, defaults to 2.0): + Weight for Classification CE loss. + mask_weight (`float`, *optional*, defaults to 5.0): + Weight for binary CE loss. + dice_weight (`float`, *optional*, defaults to 5.0): + Weight for dice loss. + contrastive_weight (`float`, *optional*, defaults to 0.5): + Weight for contrastive loss. + contrastive_temperature (`float`, *optional*, defaults to 0.07): + Initial value for scaling the contrastive logits. + train_num_points (`int`, *optional*, defaults to 12544): + Number of points to sample while calculating losses on mask predictions. + oversample_ratio (`float`, *optional*, defaults to 3.0): + Ratio to decide how many points to oversample. + importance_sample_ratio (`float`, *optional*, defaults to 0.75): + Ratio of points that are sampled via importance sampling. + init_std (`float`, *optional*, defaults to 0.02): + Standard deviation for normal intialization. + init_xavier_std (`float`, *optional*, defaults to 1.0): + Standard deviation for xavier uniform initialization. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + Epsilon for layer normalization. + is_training (`bool`, *optional*, defaults to `False`): + Whether to run in training or inference mode. + use_auxiliary_loss (`bool`, *optional*, defaults to `True`): + Whether to calculate loss using intermediate predictions from transformer decoder. + output_auxiliary_logits (`bool`, *optional*, defaults to `True`): + Whether to return intermediate predictions from transformer decoder. + strides (`list`, *optional*, defaults to `[4, 8, 16, 32]`): + List containing the strides for feature maps in the encoder. + task_seq_len (`int`, *optional*, defaults to 77): + Sequence length for tokenizing text list input. + text_encoder_width (`int`, *optional*, defaults to 256): + Hidden size for text encoder. + text_encoder_context_length (`int`, *optional*, defaults to 77): + Input sequence length for text encoder. + text_encoder_num_layers (`int`, *optional*, defaults to 6): + Number of layers for transformer in text encoder. + text_encoder_vocab_size (`int`, *optional*, defaults to 49408): + Vocabulary size for tokenizer. + text_encoder_proj_layers (`int`, *optional*, defaults to 2): + Number of layers in MLP for project text queries. + text_encoder_n_ctx (`int`, *optional*, defaults to 16): + Number of learnable text context queries. + conv_dim (`int`, *optional*, defaults to 256): + Feature map dimension to map outputs from the backbone. + mask_dim (`int`, *optional*, defaults to 256): + Dimension for feature maps in pixel decoder. + hidden_dim (`int`, *optional*, defaults to 256): + Dimension for hidden states in transformer decoder. + encoder_feedforward_dim (`int`, *optional*, defaults to 1024): + Dimension for FFN layer in pixel decoder. + norm (`str`, *optional*, defaults to `"GN"`): + Type of normalization. + encoder_layers (`int`, *optional*, defaults to 6): + Number of layers in pixel decoder. + decoder_layers (`int`, *optional*, defaults to 10): + Number of layers in transformer decoder. + use_task_norm (`bool`, *optional*, defaults to `True`): + Whether to normalize the task token. + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads in transformer layers in the pixel and transformer decoders. + dropout (`float`, *optional*, defaults to 0.1): + Dropout probability for pixel and transformer decoders. + dim_feedforward (`int`, *optional*, defaults to 2048): + Dimension for FFN layer in transformer decoder. + pre_norm (`bool`, *optional*, defaults to `False`): + Whether to normalize hidden states before attention layers in transformer decoder. + enforce_input_proj (`bool`, *optional*, defaults to `False`): + Whether to project hidden states in transformer decoder. + query_dec_layers (`int`, *optional*, defaults to 2): + Number of layers in query transformer. + common_stride (`int`, *optional*, defaults to 4): + Common stride used for features in pixel decoder. + + Examples: + ```python + >>> from transformers import OneFormerConfig, OneFormerModel + + >>> # Initializing a OneFormer shi-labs/oneformer_ade20k_swin_tiny configuration + >>> configuration = OneFormerConfig() + >>> # Initializing a model (with random weights) from the shi-labs/oneformer_ade20k_swin_tiny style configuration + >>> model = OneFormerModel(configuration) + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "oneformer" + attribute_map = {"hidden_size": "hidden_dim"} + + def __init__( + self, + backbone_config: Optional[Dict] = None, + backbone: Optional[str] = None, + use_pretrained_backbone: bool = False, + use_timm_backbone: bool = False, + backbone_kwargs: Optional[Dict] = None, + ignore_value: int = 255, + num_queries: int = 150, + no_object_weight: int = 0.1, + class_weight: float = 2.0, + mask_weight: float = 5.0, + dice_weight: float = 5.0, + contrastive_weight: float = 0.5, + contrastive_temperature: float = 0.07, + train_num_points: int = 12544, + oversample_ratio: float = 3.0, + importance_sample_ratio: float = 0.75, + init_std: float = 0.02, + init_xavier_std: float = 1.0, + layer_norm_eps: float = 1e-05, + is_training: bool = False, + use_auxiliary_loss: bool = True, + output_auxiliary_logits: bool = True, + strides: Optional[list] = [4, 8, 16, 32], + task_seq_len: int = 77, + text_encoder_width: int = 256, + text_encoder_context_length: int = 77, + text_encoder_num_layers: int = 6, + text_encoder_vocab_size: int = 49408, + text_encoder_proj_layers: int = 2, + text_encoder_n_ctx: int = 16, + conv_dim: int = 256, + mask_dim: int = 256, + hidden_dim: int = 256, + encoder_feedforward_dim: int = 1024, + norm: str = "GN", + encoder_layers: int = 6, + decoder_layers: int = 10, + use_task_norm: bool = True, + num_attention_heads: int = 8, + dropout: float = 0.1, + dim_feedforward: int = 2048, + pre_norm: bool = False, + enforce_input_proj: bool = False, + query_dec_layers: int = 2, + common_stride: int = 4, + **kwargs, + ): + if use_pretrained_backbone: + raise ValueError("Pretrained backbones are not supported yet.") + + if backbone_config is not None and backbone is not None: + raise ValueError("You can't specify both `backbone` and `backbone_config`.") + + if backbone_config is None and backbone is None: + logger.info("`backbone_config` is unset. Initializing the config with the default `Swin` backbone.") + backbone_config = CONFIG_MAPPING["swin"]( + image_size=224, + in_channels=3, + patch_size=4, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + drop_path_rate=0.3, + use_absolute_embeddings=False, + out_features=["stage1", "stage2", "stage3", "stage4"], + ) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: + raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + + self.backbone_config = backbone_config + self.backbone = backbone + self.use_pretrained_backbone = use_pretrained_backbone + self.use_timm_backbone = use_timm_backbone + self.backbone_kwargs = backbone_kwargs + self.ignore_value = ignore_value + self.num_queries = num_queries + self.no_object_weight = no_object_weight + self.class_weight = class_weight + self.mask_weight = mask_weight + self.dice_weight = dice_weight + self.contrastive_weight = contrastive_weight + self.contrastive_temperature = contrastive_temperature + self.train_num_points = train_num_points + self.oversample_ratio = oversample_ratio + self.importance_sample_ratio = importance_sample_ratio + self.init_std = init_std + self.init_xavier_std = init_xavier_std + self.layer_norm_eps = layer_norm_eps + self.is_training = is_training + self.use_auxiliary_loss = use_auxiliary_loss + self.output_auxiliary_logits = output_auxiliary_logits + self.strides = strides + self.task_seq_len = task_seq_len + self.text_encoder_width = text_encoder_width + self.text_encoder_context_length = text_encoder_context_length + self.text_encoder_num_layers = text_encoder_num_layers + self.text_encoder_vocab_size = text_encoder_vocab_size + self.text_encoder_proj_layers = text_encoder_proj_layers + self.text_encoder_n_ctx = text_encoder_n_ctx + self.conv_dim = conv_dim + self.mask_dim = mask_dim + self.hidden_dim = hidden_dim + self.encoder_feedforward_dim = encoder_feedforward_dim + self.norm = norm + self.encoder_layers = encoder_layers + self.decoder_layers = decoder_layers + self.use_task_norm = use_task_norm + self.num_attention_heads = num_attention_heads + self.dropout = dropout + self.dim_feedforward = dim_feedforward + self.pre_norm = pre_norm + self.enforce_input_proj = enforce_input_proj + self.query_dec_layers = query_dec_layers + self.common_stride = common_stride + self.num_hidden_layers = decoder_layers + + super().__init__(**kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/convert_to_hf_oneformer.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/convert_to_hf_oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..6e88d8a0555fa2a6d283720e528232de3d999274 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/convert_to_hf_oneformer.py @@ -0,0 +1,1191 @@ +# coding=utf-8 +# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Convert OneFormer checkpoints from the original repository. URL: https://github.com/SHI-Labs/OneFormer""" + +import os +import sys +from argparse import ArgumentParser +from dataclasses import dataclass +from pathlib import Path +from pprint import pformat +from typing import Any, Dict, Iterator, List, Set, Tuple + +import requests +import torch +import torchvision.transforms as T +from PIL import Image +from torch import Tensor, nn + + +try: + from detectron2.checkpoint import DetectionCheckpointer + from detectron2.config import get_cfg + from detectron2.data import MetadataCatalog + from detectron2.projects.deeplab import add_deeplab_config +except ImportError: + pass +from transformers import CLIPTokenizer, DinatConfig, SwinConfig +from transformers.models.oneformer.image_processing_oneformer import OneFormerImageProcessor +from transformers.models.oneformer.modeling_oneformer import ( + OneFormerConfig, + OneFormerForUniversalSegmentation, + OneFormerForUniversalSegmentationOutput, + OneFormerModel, + OneFormerModelOutput, +) +from transformers.models.oneformer.processing_oneformer import OneFormerProcessor +from transformers.utils import logging + + +StateDict = Dict[str, Tensor] + +logging.set_verbosity_info() +logger = logging.get_logger() + +torch.manual_seed(0) + + +class TrackedStateDict: + def __init__(self, to_track: Dict): + """This class "tracks" a python dictionary by keeping track of which item is accessed. + + Args: + to_track (Dict): The dictionary we wish to track + """ + self.to_track = to_track + self._seen: Set[str] = set() + + def __getitem__(self, key: str) -> Any: + return self.to_track[key] + + def __setitem__(self, key: str, item: Any): + self._seen.add(key) + self.to_track[key] = item + + def diff(self) -> List[str]: + """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. + This is an effective method to check if we have update all the keys + + Returns: + List[str]: List of keys not yet updated + """ + return set(self.to_track.keys()) - self._seen + + def copy(self) -> Dict: + # proxy the call to the internal dictionary + return self.to_track.copy() + + +# Image to verify the result +def prepare_img(): + url = "https://praeclarumjj3.github.io/files/coco.jpeg" + img_data = requests.get(url, stream=True).raw + im = Image.open(img_data) + return im + + +@dataclass +class Args: + """Fake command line arguments needed by oneformer/detectron2 implementation""" + + config_file: str + + +def setup_cfg(args: Args): + # load config from file and command-line arguments + cfg = get_cfg() + add_deeplab_config(cfg) + add_common_config(cfg) + add_oneformer_config(cfg) + add_swin_config(cfg) + add_dinat_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.freeze() + return cfg + + +class OriginalOneFormerConfigToOursConverter: + def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig: + model = original_config.MODEL + + dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) + id2label = dict(enumerate(dataset_catalog.stuff_classes)) + label2id = {label: idx for idx, label in id2label.items()} + + if is_swin: + if model.SWIN.EMBED_DIM == 96: + backbone_config = SwinConfig.from_pretrained( + "microsoft/swin-tiny-patch4-window7-224", + drop_path_rate=model.SWIN.DROP_PATH_RATE, + out_features=["stage1", "stage2", "stage3", "stage4"], + ) + elif model.SWIN.EMBED_DIM == 192: + backbone_config = SwinConfig.from_pretrained( + "microsoft/swin-large-patch4-window12-384", + drop_path_rate=model.SWIN.DROP_PATH_RATE, + out_features=["stage1", "stage2", "stage3", "stage4"], + ) + else: + raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!") + else: + backbone_config = DinatConfig.from_pretrained( + "shi-labs/dinat-large-11x11-in22k-in1k-384", + dilations=model.DiNAT.DILATIONS, + kernel_size=model.DiNAT.KERNEL_SIZE, + out_features=["stage1", "stage2", "stage3", "stage4"], + ) + + config: OneFormerConfig = OneFormerConfig( + backbone_config=backbone_config, + output_attentions=True, + output_hidden_states=True, + return_dict=True, + ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, + num_classes=model.SEM_SEG_HEAD.NUM_CLASSES, + num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES, + no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT, + class_weight=model.ONE_FORMER.CLASS_WEIGHT, + mask_weight=model.ONE_FORMER.MASK_WEIGHT, + dice_weight=model.ONE_FORMER.DICE_WEIGHT, + contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT, + contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE, + train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS, + oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO, + importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO, + init_std=0.02, + init_xavier_std=1.0, + layer_norm_eps=1e-05, + is_training=False, + use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION, + output_auxiliary_logits=True, + strides=[4, 8, 16, 32], + task_seq_len=original_config.INPUT.TASK_SEQ_LEN, + max_seq_len=original_config.INPUT.MAX_SEQ_LEN, + text_encoder_width=model.TEXT_ENCODER.WIDTH, + text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH, + text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS, + text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE, + text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS, + text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX, + conv_dim=model.SEM_SEG_HEAD.CONVS_DIM, + mask_dim=model.SEM_SEG_HEAD.MASK_DIM, + hidden_dim=model.ONE_FORMER.HIDDEN_DIM, + norm=model.SEM_SEG_HEAD.NORM, + encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, + encoder_feedforward_dim=1024, + decoder_layers=model.ONE_FORMER.DEC_LAYERS, + use_task_norm=model.ONE_FORMER.USE_TASK_NORM, + num_attention_heads=model.ONE_FORMER.NHEADS, + dropout=model.ONE_FORMER.DROPOUT, + dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD, + pre_norm=model.ONE_FORMER.PRE_NORM, + enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ, + query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS, + common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, + id2label=id2label, + label2id=label2id, + ) + + return config + + +class OriginalOneFormerConfigToProcessorConverter: + def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor: + model = original_config.MODEL + model_input = original_config.INPUT + dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) + + if "ade20k" in model_repo: + class_info_file = "ade20k_panoptic.json" + elif "coco" in model_repo: + class_info_file = "coco_panoptic.json" + elif "cityscapes" in model_repo: + class_info_file = "cityscapes_panoptic.json" + else: + raise ValueError("Invalid Dataset!") + + image_processor = OneFormerImageProcessor( + image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), + image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), + size=model_input.MIN_SIZE_TEST, + max_size=model_input.MAX_SIZE_TEST, + num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, + ignore_index=dataset_catalog.ignore_label, + class_info_file=class_info_file, + ) + + tokenizer = CLIPTokenizer.from_pretrained(model_repo) + + return OneFormerProcessor( + image_processor=image_processor, + tokenizer=tokenizer, + task_seq_length=original_config.INPUT.TASK_SEQ_LEN, + max_seq_length=original_config.INPUT.MAX_SEQ_LEN, + ) + + +class OriginalOneFormerCheckpointToOursConverter: + def __init__(self, original_model: nn.Module, config: OneFormerConfig): + self.original_model = original_model + self.config = config + + def pop_all(self, renamed_keys: List[Tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): + for src_key, dst_key in renamed_keys: + dst_state_dict[dst_key] = src_state_dict.pop(src_key) + + # Swin Backbone + def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): + dst_prefix: str = "pixel_level_module.encoder" + src_prefix: str = "backbone" + + renamed_keys = [ + ( + f"{src_prefix}.patch_embed.proj.weight", + f"{dst_prefix}.embeddings.patch_embeddings.projection.weight", + ), + (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"), + (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"), + (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"), + ] + num_layers = len(config.backbone_config.depths) + for layer_idx in range(num_layers): + for block_idx in range(config.backbone_config.depths[layer_idx]): + renamed_keys.extend( + [ # src, dst + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", + ), + ] + ) + # now we need to handle the attentions + # read in weights + bias of input projection layer of cross-attention + + src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] + src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] + + size = src_att_weight.shape[0] + offset = size // 3 + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" + ] = src_att_weight[:offset, :] + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" + ] = src_att_bias[:offset] + + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" + ] = src_att_weight[offset : offset * 2, :] + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" + ] = src_att_bias[offset : offset * 2] + + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" + ] = src_att_weight[-offset:, :] + dst_state_dict[ + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" + ] = src_att_bias[-offset:] + + # let's pop them + src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") + src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") + # proj + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", + ), + ] + ) + + # second norm + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", + ), + ] + ) + + # mlp + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", + ), + ] + ) + + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", + f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", + ) + ] + ) + + if layer_idx < num_layers - 1: + # patch merging + renamed_keys.extend( + [ + ( + f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", + f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight", + ), + ( + f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", + f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias", + ), + ] + ) + + # hidden states norms + renamed_keys.extend( + [ + ( + f"{src_prefix}.norm{layer_idx}.weight", + f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", + ), + ( + f"{src_prefix}.norm{layer_idx}.bias", + f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", + ), + ] + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + # Dinat Backbone + def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): + dst_prefix: str = "pixel_level_module.encoder" + src_prefix: str = "backbone" + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm") + + for i in range(2): + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.patch_embed.proj.{i}", + f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}", + ) + ) + + num_layers = len(config.backbone_config.depths) + for layer_idx in range(num_layers): + for block_idx in range(config.backbone_config.depths[layer_idx]): + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before", + ) + ) + + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after", + ) + ) + + renamed_keys.extend( + [ # src, dst + ( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb", + ), + ] + ) + # now we need to handle the attentions + # read in weights + bias of input projection layer of cross-attention + + src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] + src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] + + size = src_att_weight.shape[0] + offset = size // 3 + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight" + ] = src_att_weight[:offset, :] + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias" + ] = src_att_bias[:offset] + + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight" + ] = src_att_weight[offset : offset * 2, :] + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias" + ] = src_att_bias[offset : offset * 2] + + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight" + ] = src_att_weight[-offset:, :] + dst_state_dict[ + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias" + ] = src_att_bias[-offset:] + + # let's pop them + src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") + src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") + # proj + + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense", + ) + ) + + # mlp + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense", + ) + ) + + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2", + f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense", + ) + ) + + if layer_idx < num_layers - 1: + # patch merging + renamed_keys.extend( + [ + ( + f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight", + f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight", + ), + ( + f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight", + f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight", + ), + ( + f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias", + f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias", + ), + ] + ) + + # hidden states norms + renamed_keys.extend( + [ + ( + f"{src_prefix}.norm{layer_idx}.weight", + f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.weight", + ), + ( + f"{src_prefix}.norm{layer_idx}.bias", + f"{dst_prefix}.hidden_states_norms.stage{layer_idx+1}.bias", + ), + ] + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + # Backbone + Pixel Decoder + def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool): + dst_prefix: str = "pixel_level_module.decoder" + src_prefix: str = "sem_seg_head.pixel_decoder" + + if is_swin: + self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config) + else: + self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config) + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): + self_attn_keys = [] + self_attn_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights") + ) + self_attn_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj") + ) + self_attn_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets") + ) + self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj")) + + return self_attn_keys + + def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str): + encoder_keys = [] + encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1")) + encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2")) + encoder_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm") + ) + encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm")) + encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")) + + return encoder_keys + + # convolution layer for final features + renamed_keys = [ + (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"), + (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"), + (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"), + ] + + renamed_keys.extend( + [ + (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"), + (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"), + (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"), + ] + ) + + # proj layers + for i in range(3): + for j in range(2): + renamed_keys.extend( + [ + (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"), + (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"), + ] + ) + + renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")]) + + # layers + for layer_idx in range(self.config.encoder_layers): + renamed_keys.extend( + rename_keys_for_encoder_layer( + f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}" + ) + ) + + # proj + renamed_keys.extend( + [ + (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), + (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), + ] + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + # Transformer Decoder + def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module.decoder.layers" + src_prefix: str = "sem_seg_head.predictor" + for i in range(self.config.decoder_layers - 1): + # read in weights + bias of input projection layer of self-attention + in_proj_weight = src_state_dict.pop( + f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight" + ) + in_proj_bias = src_state_dict.pop( + f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias" + ) + # next, add query, keys and values (in that order) to the state dict + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256] + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512] + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:] + + def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "transformer_module" + src_prefix: str = "sem_seg_head.predictor" + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + def rename_keys_for_attn(src_prefix: str, dst_prefix: str): + attn_keys = [ + (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), + (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), + ] + attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) + + return attn_keys + + def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): + attn_keys = [] + attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) + + return attn_keys + + def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str): + query_transformer_layer_keys = [] + + query_transformer_layer_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1") + ) + query_transformer_layer_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2") + ) + query_transformer_layer_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1") + ) + query_transformer_layer_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2") + ) + query_transformer_layer_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3") + ) + + query_transformer_layer_keys.extend( + rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") + ) + + query_transformer_layer_keys.extend( + rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") + ) + + return query_transformer_layer_keys + + def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str): + cross_attn_layer_keys = [] + + cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) + cross_attn_layer_keys.extend( + rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") + ) + + return cross_attn_layer_keys + + def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str): + self_attn_layer_keys = [] + + self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) + self_attn_layer_keys.extend( + rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") + ) + + return self_attn_layer_keys + + def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str): + ffn_layer_keys = [] + + ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")) + ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")) + ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) + + return ffn_layer_keys + + def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int): + transformer_decoder_layer_keys = [] + + transformer_decoder_layer_keys.extend( + rename_keys_for_cross_attn_layer( + f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn" + ) + ) + + transformer_decoder_layer_keys.extend( + rename_keys_for_self_attn_layer( + f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn" + ) + ) + + transformer_decoder_layer_keys.extend( + rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn") + ) + + return transformer_decoder_layer_keys + + # positional embedding for object queries + renamed_keys = [ + (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), + (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"), + ] + + # norm + renamed_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm") + ) + + # proj + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection" + ) + ) + + renamed_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed") + ) + + for i in range(3): + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0" + ) + ) + + # norm + renamed_keys.extend( + rename_keys_for_weight_bias( + f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm" + ) + ) + + # transformer to update queries with task tokens + for i in range(self.config.query_dec_layers): + renamed_keys.extend( + rename_keys_for_query_transformer_layer( + f"{src_prefix}.class_transformer.decoder.layers.{i}", + f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}", + ) + ) + + # decoder layers + for i in range(self.config.decoder_layers - 1): + renamed_keys.extend( + rename_keys_for_transformer_decoder_layer( + f"{src_prefix}", + f"{dst_prefix}.decoder.layers", + i, + ) + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict) + + def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "task_encoder" + src_prefix: str = "task_mlp" + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + renamed_keys = [] + + for i in range(2): + renamed_keys.extend( + rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0") + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "text_mapper.text_projector" + src_prefix: str = "text_projector" + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + renamed_keys = [] + + for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]): + renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0")) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict): + dst_prefix: str = "text_mapper.text_encoder" + src_prefix: str = "text_encoder" + + self.replace_text_projector(dst_state_dict, src_state_dict) + + def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): + return [ + (f"{src_prefix}.weight", f"{dst_prefix}.weight"), + (f"{src_prefix}.bias", f"{dst_prefix}.bias"), + ] + + def rename_keys_for_attn(src_prefix: str, dst_prefix: str): + attn_keys = [ + (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), + (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), + ] + attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) + + return attn_keys + + def rename_keys_for_layer(src_prefix: str, dst_prefix: str): + resblock_keys = [] + + resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1")) + resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2")) + resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1")) + resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2")) + resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn")) + + return resblock_keys + + renamed_keys = [ + ("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"), + ] + + renamed_keys.extend( + [ + (f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"), + (f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"), + ] + ) + + renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final")) + + for i in range(self.config.text_encoder_config["text_encoder_num_layers"]): + renamed_keys.extend( + rename_keys_for_layer( + f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}" + ) + ) + + self.pop_all(renamed_keys, dst_state_dict, src_state_dict) + + def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel: + dst_state_dict = TrackedStateDict(oneformer.state_dict()) + src_state_dict = self.original_model.state_dict() + + self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin) + self.replace_transformer_module(dst_state_dict, src_state_dict) + self.replace_task_mlp(dst_state_dict, src_state_dict) + if self.config.is_training: + self.replace_text_mapper(dst_state_dict, src_state_dict) + + logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") + logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") + logger.info("🙌 Done") + + oneformer.load_state_dict(dst_state_dict) + + return oneformer + + @staticmethod + def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[Tuple[object, Path, Path]]: + checkpoints: List[Path] = checkpoints_dir.glob("**/*.pth") + + for checkpoint in checkpoints: + logger.info(f"💪 Converting {checkpoint.stem}") + # find associated config file + config: Path = config_dir / f"{checkpoint.stem}.yaml" + + yield config, checkpoint + + +def post_process_sem_seg_output(outputs: OneFormerForUniversalSegmentationOutput, target_size: Tuple[int, int]): + # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] + class_queries_logits = outputs.class_queries_logits + # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] + masks_queries_logits = outputs.masks_queries_logits + if target_size is not None: + masks_queries_logits = torch.nn.functional.interpolate( + masks_queries_logits, + size=target_size, + mode="bilinear", + align_corners=False, + ) + # remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] + masks_probs = masks_queries_logits.sigmoid() + # now we want to sum over the queries, + # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ + # where $ softmax(p) \in R^{q, c} $ is the mask classes + # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities + # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + + return segmentation + + +def test( + original_model, + our_model: OneFormerForUniversalSegmentation, + processor: OneFormerProcessor, + model_repo: str, +): + def _preprocess_text(text_list=None, max_length=77): + if text_list is None: + raise ValueError("tokens cannot be None.") + + tokens = tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True) + + attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"] + + token_inputs = [] + for attn_mask, input_id in zip(attention_masks, input_ids): + token = torch.tensor(attn_mask) * torch.tensor(input_id) + token_inputs.append(token.unsqueeze(0)) + + token_inputs = torch.cat(token_inputs, dim=0) + return token_inputs + + with torch.no_grad(): + tokenizer = CLIPTokenizer.from_pretrained(model_repo) + original_model = original_model.eval() + our_model = our_model.eval() + + im = prepare_img() + + tr = T.Compose( + [ + T.Resize((640, 640)), + T.ToTensor(), + T.Normalize( + mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, + std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, + ), + ], + ) + + x = tr(im).unsqueeze(0) + + task_input = ["the task is semantic"] + task_token = _preprocess_text(task_input, max_length=processor.task_seq_length) + + original_model_backbone_features = original_model.backbone(x.clone()) + + our_model_output: OneFormerModelOutput = our_model.model(x.clone(), task_token, output_hidden_states=True) + + for original_model_feature, our_model_feature in zip( + original_model_backbone_features.values(), our_model_output.encoder_hidden_states + ): + assert torch.allclose( + original_model_feature, our_model_feature, atol=3e-3 + ), "The backbone features are not the same." + mask_features, _, multi_scale_features, _, _ = original_model.sem_seg_head.pixel_decoder.forward_features( + original_model_backbone_features + ) + + original_pixel_decoder_features = [] + original_pixel_decoder_features.append(mask_features) + for i in range(len(multi_scale_features)): + original_pixel_decoder_features.append(multi_scale_features[i]) + + for original_model_feature, our_model_feature in zip( + original_pixel_decoder_features, our_model_output.pixel_decoder_hidden_states + ): + assert torch.allclose( + original_model_feature, our_model_feature, atol=3e-4 + ), "The pixel decoder feature are not the same" + + tr_complete = T.Compose( + [ + T.Resize((640, 640)), + T.ToTensor(), + ], + ) + + y = (tr_complete(im) * 255.0).to(torch.int).float() + + # let's test the full model + original_model_out = original_model([{"image": y.clone(), "task": "The task is semantic"}]) + + original_segmentation = original_model_out[0]["sem_seg"] + + our_model_out: OneFormerForUniversalSegmentationOutput = our_model( + x.clone(), task_token, output_hidden_states=True + ) + + our_segmentation = post_process_sem_seg_output(our_model_out, target_size=(640, 640))[0] + + assert torch.allclose( + original_segmentation, our_segmentation, atol=1e-3 + ), "The segmentation image is not the same." + + logger.info("✅ Test passed!") + + +def get_name(checkpoint_file: Path): + model_name_raw: str = checkpoint_file.stem + + backbone = "swin" if "swin" in model_name_raw else "dinat" + dataset = "" + if "coco" in model_name_raw: + dataset = "coco" + elif "ade20k" in model_name_raw: + dataset = "ade20k" + elif "cityscapes" in model_name_raw: + dataset = "cityscapes" + else: + raise ValueError( + f"{model_name_raw} must be wrong since we didn't find 'coco' or 'ade20k' or 'cityscapes' in it " + ) + + backbone_types = ["tiny", "large"] + + backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] + + model_name = f"oneformer_{dataset}_{backbone}_{backbone_type}" + + return model_name + + +if __name__ == "__main__": + parser = ArgumentParser( + description=( + "Command line to convert the original oneformer models (with swin backbone) to transformers" + " implementation." + ) + ) + + parser.add_argument( + "--checkpoints_dir", + type=Path, + help=( + "A directory containing the model's checkpoints. The directory has to have the following structure:" + " structure: //.pth; where name must follow the" + " following nomenclature nomenclature: oneformer___" + ), + ) + parser.add_argument( + "--configs_dir", + type=Path, + help=( + "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" + " structure: //.yaml; where name must follow the" + " following nomenclature nomenclature: oneformer___" + ), + ) + parser.add_argument( + "--pytorch_dump_folder_path", + required=True, + type=Path, + help="Path to the folder to output PyTorch models.", + ) + parser.add_argument( + "--oneformer_dir", + required=True, + type=Path, + help=( + "A path to OneFormer's original implementation directory. You can download from here: " + "https://github.com/SHI-Labs/OneFormer" + ), + ) + + args = parser.parse_args() + + checkpoints_dir: Path = args.checkpoints_dir + config_dir: Path = args.configs_dir + save_directory: Path = args.pytorch_dump_folder_path + oneformer_dir: Path = args.oneformer_dir + # append the path to the parents to oneformer dir + sys.path.append(str(oneformer_dir.parent)) + # and import what's needed + from OneFormer.oneformer import add_common_config, add_dinat_config, add_oneformer_config, add_swin_config + from OneFormer.oneformer.oneformer_model import OneFormer as OriginalOneFormer + + if not save_directory.exists(): + save_directory.mkdir(parents=True) + + for config_file, checkpoint_file in OriginalOneFormerCheckpointToOursConverter.using_dirs( + checkpoints_dir, config_dir + ): + processor = OriginalOneFormerConfigToProcessorConverter()( + setup_cfg(Args(config_file=config_file)), os.path.join("shi-labs", config_file.stem) + ) + + original_config = setup_cfg(Args(config_file=config_file)) + oneformer_kwargs = OriginalOneFormer.from_config(original_config) + + original_model = OriginalOneFormer(**oneformer_kwargs).eval() + + DetectionCheckpointer(original_model).load(str(checkpoint_file)) + + is_swin = "swin" in config_file.stem + + config: OneFormerConfig = OriginalOneFormerConfigToOursConverter()(original_config, is_swin) + + oneformer = OneFormerModel(config=config).eval() + + converter = OriginalOneFormerCheckpointToOursConverter(original_model, config) + + oneformer = converter.convert(oneformer, is_swin) + + oneformer_for_universal_segmentation = OneFormerForUniversalSegmentation(config=config).eval() + + oneformer_for_universal_segmentation.model = oneformer + + test( + original_model, + oneformer_for_universal_segmentation, + processor, + os.path.join("shi-labs", config_file.stem), + ) + + model_name = get_name(checkpoint_file) + logger.info(f"🪄 Saving {model_name}") + + processor.save_pretrained(save_directory / model_name) + oneformer_for_universal_segmentation.save_pretrained(save_directory / model_name) + + processor.push_to_hub( + repo_id=os.path.join("shi-labs", config_file.stem), + commit_message="Add configs", + use_temp_dir=True, + ) + oneformer_for_universal_segmentation.push_to_hub( + repo_id=os.path.join("shi-labs", config_file.stem), + commit_message="Add model", + use_temp_dir=True, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/image_processing_oneformer.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/image_processing_oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..9f865f8efd9b9497232fb5bd78a4c2f04f512115 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/image_processing_oneformer.py @@ -0,0 +1,1372 @@ +# coding=utf-8 +# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for OneFormer.""" + +import json +import os +import warnings +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union + +import numpy as np +from huggingface_hub import hf_hub_download +from huggingface_hub.utils import RepositoryNotFoundError + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + PaddingMode, + get_resize_output_image_size, + pad, + rescale, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + TensorType, + is_torch_available, + is_torch_tensor, + logging, +) + + +logger = logging.get_logger(__name__) + + +if is_torch_available(): + import torch + from torch import nn + + +# Copied from transformers.models.detr.image_processing_detr.max_across_indices +def max_across_indices(values: Iterable[Any]) -> List[Any]: + """ + Return the maximum value across all indices of an iterable of values. + """ + return [max(values_i) for values_i in zip(*values)] + + +# Copied from transformers.models.detr.image_processing_detr.get_max_height_width +def get_max_height_width( + images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> List[int]: + """ + Get the maximum height and width across all images in a batch. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(images[0]) + + if input_data_format == ChannelDimension.FIRST: + _, max_height, max_width = max_across_indices([img.shape for img in images]) + elif input_data_format == ChannelDimension.LAST: + max_height, max_width, _ = max_across_indices([img.shape for img in images]) + else: + raise ValueError(f"Invalid channel dimension format: {input_data_format}") + return (max_height, max_width) + + +# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask +def make_pixel_mask( + image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None +) -> np.ndarray: + """ + Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. + + Args: + image (`np.ndarray`): + Image to make the pixel mask for. + output_size (`Tuple[int, int]`): + Output size of the mask. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + mask = np.zeros(output_size, dtype=np.int64) + mask[:input_height, :input_width] = 1 + return mask + + +# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle +def binary_mask_to_rle(mask): + """ + Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return list(runs) + + +# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + +# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + + Args: + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + + return masks[to_keep], scores[to_keep], labels[to_keep] + + +# Copied from transformers.models.detr.image_processing_detr.check_segment_validity +def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= mask_threshold).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +# Copied from transformers.models.detr.image_processing_detr.compute_segments +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = nn.functional.interpolate( + mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity( + mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold + ) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + +# Copied from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks +def convert_segmentation_map_to_binary_masks( + segmentation_map: "np.ndarray", + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, +): + if reduce_labels and ignore_index is None: + raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") + + if reduce_labels: + segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) + + # Get unique ids (class or instance ids based on input) + all_labels = np.unique(segmentation_map) + + # Drop background label if applicable + if ignore_index is not None: + all_labels = all_labels[all_labels != ignore_index] + + # Generate a binary mask for each object instance + binary_masks = [(segmentation_map == i) for i in all_labels] + binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) + + # Convert instance ids to class ids + if instance_id_to_semantic_id is not None: + labels = np.zeros(all_labels.shape[0]) + + for label in all_labels: + class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] + labels[all_labels == label] = class_id - 1 if reduce_labels else class_id + else: + labels = all_labels + + return binary_masks.astype(np.float32), labels.astype(np.int64) + + +def get_oneformer_resize_output_image_size( + image: np.ndarray, + size: Union[int, Tuple[int, int], List[int], Tuple[int]], + max_size: Optional[int] = None, + default_to_square: bool = True, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> tuple: + """ + Computes the output size given the desired size. + + Args: + image (`np.ndarray`): + The input image. + size (`int` or `Tuple[int, int]` or `List[int]` or `Tuple[int]`): + The size of the output image. + max_size (`int`, *optional*): + The maximum size of the output image. + default_to_square (`bool`, *optional*, defaults to `True`): + Whether to default to square if no size is provided. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + + Returns: + `Tuple[int, int]`: The output size. + """ + output_size = get_resize_output_image_size( + input_image=image, + size=size, + default_to_square=default_to_square, + max_size=max_size, + input_data_format=input_data_format, + ) + return output_size + + +def prepare_metadata(class_info): + metadata = {} + class_names = [] + thing_ids = [] + for key, info in class_info.items(): + metadata[key] = info["name"] + class_names.append(info["name"]) + if info["isthing"]: + thing_ids.append(int(key)) + metadata["thing_ids"] = thing_ids + metadata["class_names"] = class_names + return metadata + + +def load_metadata(repo_id, class_info_file): + fname = os.path.join("" if repo_id is None else repo_id, class_info_file) + + if not os.path.exists(fname) or not os.path.isfile(fname): + if repo_id is None: + raise ValueError(f"Could not file {fname} locally. repo_id must be defined if loading from the hub") + # We try downloading from a dataset by default for backward compatibility + try: + fname = hf_hub_download(repo_id, class_info_file, repo_type="dataset") + except RepositoryNotFoundError: + fname = hf_hub_download(repo_id, class_info_file) + + with open(fname, "r") as f: + class_info = json.load(f) + + return class_info + + +class OneFormerImageProcessor(BaseImageProcessor): + r""" + Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and + optional text inputs and targets for the model. + + This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the input to a certain `size`. + size (`int`, *optional*, defaults to 800): + Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a + sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of + the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * + height / width, size)`. + resample (`int`, *optional*, defaults to `Resampling.BILINEAR`): + An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, + `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, + `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set + to `True`. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the input to a certain `scale`. + rescale_factor (`float`, *optional*, defaults to `1/ 255`): + Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. + image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the + ImageNet std. + ignore_index (`int`, *optional*): + Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels + denoted with 0 (background) will be replaced with `ignore_index`. + do_reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 + is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). + The background label will be replaced by `ignore_index`. + repo_path (`str`, *optional*, defaults to `"shi-labs/oneformer_demo"`): + Path to hub repo or local directory containing the JSON file with class information for the dataset. + If unset, will look for `class_info_file` in the current working directory. + class_info_file (`str`, *optional*): + JSON file containing class information for the dataset. See `shi-labs/oneformer_demo/cityscapes_panoptic.json` for an example. + num_text (`int`, *optional*): + Number of text entries in the text input list. + """ + + model_input_names = ["pixel_values", "pixel_mask", "task_inputs"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: float = 1 / 255, + do_normalize: bool = True, + image_mean: Union[float, List[float]] = None, + image_std: Union[float, List[float]] = None, + ignore_index: Optional[int] = None, + do_reduce_labels: bool = False, + repo_path: Optional[str] = "shi-labs/oneformer_demo", + class_info_file: str = None, + num_text: Optional[int] = None, + **kwargs, + ): + if "max_size" in kwargs: + self._max_size = kwargs.pop("max_size") + else: + self._max_size = 1333 + + size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} + size = get_size_dict(size, max_size=self._max_size, default_to_square=False) + + if "reduce_labels" in kwargs: + warnings.warn( + "The `reduce_labels` argument is deprecated and will be removed in v4.27. " + "Please use `do_reduce_labels` instead.", + FutureWarning, + ) + do_reduce_labels = kwargs.pop("reduce_labels") + + if class_info_file is None: + raise ValueError("You must provide a `class_info_file`") + + super().__init__(**kwargs) + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self.ignore_index = ignore_index + self.do_reduce_labels = do_reduce_labels + self.class_info_file = class_info_file + self.repo_path = repo_path + self.metadata = prepare_metadata(load_metadata(repo_path, class_info_file)) + self.num_text = num_text + self._valid_processor_keys = [ + "images", + "task_inputs", + "segmentation_maps", + "instance_id_to_semantic_id", + "do_resize", + "size", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "ignore_index", + "do_reduce_labels", + "return_tensors", + "data_format", + "input_data_format", + ] + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format=None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an + int, smaller edge of the image will be matched to this number. + """ + if "max_size" in kwargs: + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + max_size = kwargs.pop("max_size") + else: + max_size = None + size = get_size_dict(size, max_size=max_size, default_to_square=False) + if "shortest_edge" in size and "longest_edge" in size: + size, max_size = size["shortest_edge"], size["longest_edge"] + elif "height" in size and "width" in size: + size = (size["height"], size["width"]) + max_size = None + else: + raise ValueError( + "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" + f" {size.keys()}." + ) + size = get_oneformer_resize_output_image_size( + image=image, size=size, max_size=max_size, default_to_square=False, input_data_format=input_data_format + ) + image = resize( + image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format + ) + return image + + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale + def rescale( + self, + image: np.ndarray, + rescale_factor: float, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Rescale the image by the given factor. image = image * rescale_factor. + + Args: + image (`np.ndarray`): + Image to rescale. + rescale_factor (`float`): + The value to use for rescaling. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format for the input image. If unset, is inferred from the input image. Can be + one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + """ + return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) + + # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks + def convert_segmentation_map_to_binary_masks( + self, + segmentation_map: "np.ndarray", + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, + ): + reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels + ignore_index = ignore_index if ignore_index is not None else self.ignore_index + return convert_segmentation_map_to_binary_masks( + segmentation_map=segmentation_map, + instance_id_to_semantic_id=instance_id_to_semantic_id, + ignore_index=ignore_index, + reduce_labels=reduce_labels, + ) + + def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature: + return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs) + + def _preprocess( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + if do_resize: + image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format) + if do_rescale: + image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) + if do_normalize: + image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) + return image + + def _preprocess_image( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Preprocesses a single image.""" + # All transformations expect numpy arrays. + image = to_numpy_array(image) + if is_scaled_image(image) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + image = self._preprocess( + image=image, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + input_data_format=input_data_format, + ) + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + return image + + def _preprocess_mask( + self, + segmentation_map: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Preprocesses a single mask.""" + segmentation_map = to_numpy_array(segmentation_map) + # Add channel dimension if missing - needed for certain transformations + if segmentation_map.ndim == 2: + added_channel_dim = True + segmentation_map = segmentation_map[None, ...] + input_data_format = ChannelDimension.FIRST + else: + added_channel_dim = False + if input_data_format is None: + input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) + # TODO: (Amy) + # Remork segmentation map processing to include reducing labels and resizing which doesn't + # drop segment IDs > 255. + segmentation_map = self._preprocess( + image=segmentation_map, + do_resize=do_resize, + resample=PILImageResampling.NEAREST, + size=size, + do_rescale=False, + do_normalize=False, + input_data_format=input_data_format, + ) + # Remove extra channel dimension if added for processing + if added_channel_dim: + segmentation_map = segmentation_map.squeeze(0) + return segmentation_map + + def preprocess( + self, + images: ImageInput, + task_inputs: Optional[List[str]] = None, + segmentation_maps: Optional[ImageInput] = None, + instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + ignore_index: Optional[int] = None, + do_reduce_labels: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> BatchFeature: + if "pad_and_return_pixel_mask" in kwargs: + warnings.warn( + "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", + FutureWarning, + ) + if "reduce_labels" in kwargs: + warnings.warn( + "The `reduce_labels` argument is deprecated and will be removed in a v4.27. Please use" + " `do_reduce_labels` instead.", + FutureWarning, + ) + if do_reduce_labels is not None: + raise ValueError( + "You cannot use both `reduce_labels` and `do_reduce_labels` arguments. Please use" + " `do_reduce_labels` instead." + ) + do_reduce_labels = kwargs.pop("reduce_labels") + + if task_inputs is None: + # Default value + task_inputs = ["panoptic"] + + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False, max_size=self._max_size) + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + ignore_index = ignore_index if ignore_index is not None else self.ignore_index + do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + + if segmentation_maps is not None and not valid_images(segmentation_maps): + raise ValueError( + "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + images = make_list_of_images(images) + if segmentation_maps is not None: + segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) + + if segmentation_maps is not None and len(images) != len(segmentation_maps): + raise ValueError("Images and segmentation maps must have the same length.") + + images = [ + self._preprocess_image( + image, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + ) + for image in images + ] + + if segmentation_maps is not None: + segmentation_maps = [ + self._preprocess_mask(segmentation_map, do_resize, size, input_data_format=input_data_format) + for segmentation_map in segmentation_maps + ] + encoded_inputs = self.encode_inputs( + images, + task_inputs, + segmentation_maps, + instance_id_to_semantic_id, + ignore_index, + do_reduce_labels, + return_tensors, + input_data_format=input_data_format, + ) + return encoded_inputs + + # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image + def _pad_image( + self, + image: np.ndarray, + output_size: Tuple[int, int], + constant_values: Union[float, Iterable[float]] = 0, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Pad an image with zeros to the given size. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = output_size + + pad_bottom = output_height - input_height + pad_right = output_width - input_width + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=PaddingMode.CONSTANT, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + return padded_image + + # Copied from transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad + def pad( + self, + images: List[np.ndarray], + constant_values: Union[float, Iterable[float]] = 0, + return_pixel_mask: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> BatchFeature: + """ + Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width + in the batch and optionally returns their corresponding pixel mask. + + Args: + image (`np.ndarray`): + Image to pad. + constant_values (`float` or `Iterable[float]`, *optional*): + The value to use for the padding if `mode` is `"constant"`. + return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether to return a pixel mask. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + pad_size = get_max_height_width(images, input_data_format=input_data_format) + + padded_images = [ + self._pad_image( + image, + pad_size, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + for image in images + ] + data = {"pixel_values": padded_images} + + if return_pixel_mask: + masks = [ + make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) + for image in images + ] + data["pixel_mask"] = masks + + return BatchFeature(data=data, tensor_type=return_tensors) + + def get_semantic_annotations(self, label, num_class_obj): + annotation_classes = label["classes"] + annotation_masks = label["masks"] + + texts = ["a semantic photo"] * self.num_text + classes = [] + masks = [] + + for idx in range(len(annotation_classes)): + class_id = annotation_classes[idx] + mask = annotation_masks[idx] + if not np.all(mask is False): + if class_id not in classes: + cls_name = self.metadata[str(class_id)] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + else: + idx = classes.index(class_id) + masks[idx] += mask + masks[idx] = np.clip(masks[idx], 0, 1) + + num = 0 + for i, cls_name in enumerate(self.metadata["class_names"]): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + masks = np.array(masks) + return classes, masks, texts + + def get_instance_annotations(self, label, num_class_obj): + annotation_classes = label["classes"] + annotation_masks = label["masks"] + + texts = ["an instance photo"] * self.num_text + classes = [] + masks = [] + + for idx in range(len(annotation_classes)): + class_id = annotation_classes[idx] + mask = annotation_masks[idx] + + if class_id in self.metadata["thing_ids"]: + if not np.all(mask is False): + cls_name = self.metadata[str(class_id)] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + + num = 0 + for i, cls_name in enumerate(self.metadata["class_names"]): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + masks = np.array(masks) + return classes, masks, texts + + def get_panoptic_annotations(self, label, num_class_obj): + annotation_classes = label["classes"] + annotation_masks = label["masks"] + + texts = ["an panoptic photo"] * self.num_text + classes = [] + masks = [] + + for idx in range(len(annotation_classes)): + class_id = annotation_classes[idx] + mask = annotation_masks[idx].data + if not np.all(mask is False): + cls_name = self.metadata[str(class_id)] + classes.append(class_id) + masks.append(mask) + num_class_obj[cls_name] += 1 + + num = 0 + for i, cls_name in enumerate(self.metadata["class_names"]): + if num_class_obj[cls_name] > 0: + for _ in range(num_class_obj[cls_name]): + if num >= len(texts): + break + texts[num] = f"a photo with a {cls_name}" + num += 1 + + classes = np.array(classes) + masks = np.array(masks) + return classes, masks, texts + + def encode_inputs( + self, + pixel_values_list: List[ImageInput], + task_inputs: List[str], + segmentation_maps: ImageInput = None, + instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, + ignore_index: Optional[int] = None, + reduce_labels: bool = False, + return_tensors: Optional[Union[str, TensorType]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + """ + Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. + + OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps + will be converted to lists of binary masks and their respective labels. Let's see an example, assuming + `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = + [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for + each mask. + + Args: + pixel_values_list (`List[ImageInput]`): + List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, + width)`. + + task_inputs (`List[str]`): + List of task values. + + segmentation_maps (`ImageInput`, *optional*): + The corresponding semantic segmentation maps with the pixel-wise annotations. + + (`bool`, *optional*, defaults to `True`): + Whether or not to pad images up to the largest image in a batch and create a pixel mask. + + If left to the default, will return a pixel mask that is: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): + A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an + instance segmentation map where each pixel represents an instance id. Can be provided as a single + dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map + instance ids in each image separately. + + return_tensors (`str` or [`~file_utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred from the input + image. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in + `self.model_input_names`). + - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model + (when `annotations` are provided). + - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when + `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of + `mask_labels[i][j]` if `class_labels[i][j]`. + - **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are + provided). They identify the binary masks present in the image. + """ + ignore_index = self.ignore_index if ignore_index is None else ignore_index + reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels + pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(pixel_values_list[0]) + + pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format) + encoded_inputs = self.pad( + pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format + ) + + annotations = None + if segmentation_maps is not None: + segmentation_maps = map(np.array, segmentation_maps) + annotations = [] + for idx, segmentation_map in enumerate(segmentation_maps): + # Use instance2class_id mapping per image + if isinstance(instance_id_to_semantic_id, list): + instance_id = instance_id_to_semantic_id[idx] + else: + instance_id = instance_id_to_semantic_id + # Use instance2class_id mapping per image + masks, classes = self.convert_segmentation_map_to_binary_masks( + segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels + ) + annotations.append({"masks": masks, "classes": classes}) + + if annotations is not None: + mask_labels = [] + class_labels = [] + text_inputs = [] + + num_class_obj = {} + for cls_name in self.metadata["class_names"]: + num_class_obj[cls_name] = 0 + + for i, label in enumerate(annotations): + task = task_inputs[i] + if task == "semantic": + classes, masks, texts = self.get_semantic_annotations(label, num_class_obj) + elif task == "instance": + classes, masks, texts = self.get_instance_annotations(label, num_class_obj) + elif task == "panoptic": + classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj) + else: + raise ValueError(f"{task} was not expected, expected `semantic`, `instance` or `panoptic`") + + # we cannot batch them since they don't share a common class size + masks = [mask[None, ...] for mask in masks] + masks = [ + self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks + ] + masks = np.concatenate(masks, axis=0) + mask_labels.append(torch.from_numpy(masks)) + class_labels.append(torch.from_numpy(classes).long()) + text_inputs.append(texts) + + encoded_inputs["mask_labels"] = mask_labels + encoded_inputs["class_labels"] = class_labels + encoded_inputs["text_inputs"] = text_inputs + + # This needs to be tokenized before sending to the model. + encoded_inputs["task_inputs"] = [f"the task is {task_input}" for task_input in task_inputs] + + return encoded_inputs + + # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation + def post_process_semantic_segmentation( + self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None + ) -> "torch.Tensor": + """ + Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + Returns: + `List[torch.Tensor]`: + A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) + corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each + `torch.Tensor` correspond to a semantic class id. + """ + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + # Remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Semantic segmentation logits of shape (batch_size, num_classes, height, width) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + batch_size = class_queries_logits.shape[0] + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if batch_size != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + semantic_segmentation = [] + for idx in range(batch_size): + resized_logits = torch.nn.functional.interpolate( + segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = segmentation.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation + + def post_process_instance_segmentation( + self, + outputs, + task_type: str = "instance", + is_demo: bool = True, + threshold: float = 0.5, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + target_sizes: Optional[List[Tuple[int, int]]] = None, + return_coco_annotation: Optional[bool] = False, + ): + """ + Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation + predictions. Only supports PyTorch. + + Args: + outputs ([`OneFormerForUniversalSegmentationOutput`]): + The outputs from [`OneFormerForUniversalSegmentationOutput`]. + task_type (`str`, *optional)*, defaults to "instance"): + The post processing depends on the task token input. If the `task_type` is "panoptic", we need to + ignore the stuff predictions. + is_demo (`bool`, *optional)*, defaults to `True`): + Whether the model is in demo mode. If true, use threshold to predict final masks. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction in batch. If left to None, predictions will not be + resized. + return_coco_annotation (`bool`, *optional)*, defaults to `False`): + Whether to return predictions in COCO format. + + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set + to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized + to the corresponding `target_sizes` entry. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- an integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. + Multiple instances of the same class / label were fused and assigned a single `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. + """ + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + device = masks_queries_logits.device + batch_size = class_queries_logits.shape[0] + num_queries = class_queries_logits.shape[1] + num_classes = class_queries_logits.shape[-1] - 1 + + # Loop over items in batch size + results: List[Dict[str, torch.Tensor]] = [] + + for i in range(batch_size): + # [Q, K] + scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1] + labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) + + # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) + scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) + labels_per_image = labels[topk_indices] + + topk_indices = torch.div(topk_indices, num_classes, rounding_mode="floor") + # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) + mask_pred = masks_queries_logits[i][topk_indices] + + # Only consider scores with confidence over [threshold] for demo + if is_demo: + keep = scores_per_image > threshold + scores_per_image = scores_per_image[keep] + labels_per_image = labels_per_image[keep] + mask_pred = mask_pred[keep] + + # if this is panoptic segmentation, we only keep the "thing" classes + if task_type == "panoptic": + keep = torch.zeros_like(scores_per_image).bool() + for j, lab in enumerate(labels_per_image): + keep[j] = lab in self.metadata["thing_ids"] + + scores_per_image = scores_per_image[keep] + labels_per_image = labels_per_image[keep] + mask_pred = mask_pred[keep] + + if mask_pred.shape[0] <= 0: + height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) + continue + + if "ade20k" in self.class_info_file and not is_demo and "instance" in task_type: + for j in range(labels_per_image.shape[0]): + labels_per_image[j] = self.metadata["thing_ids"].index(labels_per_image[j].item()) + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_pred, + scores_per_image, + labels_per_image, + mask_threshold, + overlap_mask_area_threshold, + set(), + target_size, + ) + + # Return segmentation map in run-length encoding (RLE) format + if return_coco_annotation: + segmentation = convert_segmentation_to_rle(segmentation) + + results.append({"segmentation": segmentation, "segments_info": segments}) + return results + + # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation + def post_process_panoptic_segmentation( + self, + outputs, + threshold: float = 0.5, + mask_threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_sizes: Optional[List[Tuple[int, int]]] = None, + ) -> List[Dict]: + """ + Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation + predictions. Only supports PyTorch. + + Args: + outputs ([`MaskFormerForInstanceSegmentationOutput`]): + The outputs from [`MaskFormerForInstanceSegmentation`]. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + label_ids_to_fuse (`Set[int]`, *optional*): + The labels in this state will have all their instances be fused together. For instance we could say + there can only be one sky in an image, but several persons, so the label ID for sky would be in that + set, but not the one for person. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction in batch. If left to None, predictions will not be + resized. + + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set + to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized + to the corresponding `target_sizes` entry. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- an integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. + Multiple instances of the same class / label were fused and assigned a single `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. + """ + + if label_ids_to_fuse is None: + logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") + label_ids_to_fuse = set() + + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] + num_labels = class_queries_logits.shape[-1] - 1 + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) + pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) + + # Loop over items in batch size + results: List[Dict[str, TensorType]] = [] + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels + ) + + # No mask found + if mask_probs_item.shape[0] <= 0: + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs=mask_probs_item, + pred_scores=pred_scores_item, + pred_labels=pred_labels_item, + mask_threshold=mask_threshold, + overlap_mask_area_threshold=overlap_mask_area_threshold, + label_ids_to_fuse=label_ids_to_fuse, + target_size=target_size, + ) + + results.append({"segmentation": segmentation, "segments_info": segments}) + return results diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/modeling_oneformer.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/modeling_oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..6af4226995bfa1405425cede88b883d58218ccb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/modeling_oneformer.py @@ -0,0 +1,3259 @@ +# coding=utf-8 +# Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch OneFormer model.""" +import copy +import math +import warnings +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from torch import Tensor, nn +from torch.cuda.amp import autocast + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_accelerate_available, + is_scipy_available, + logging, + replace_return_docstrings, + requires_backends, +) +from ...utils.backbone_utils import load_backbone +from .configuration_oneformer import OneFormerConfig + + +if is_accelerate_available(): + from accelerate import PartialState + from accelerate.utils import reduce + +logger = logging.get_logger(__name__) + + +_CONFIG_FOR_DOC = "OneFormerConfig" +_CHECKPOINT_FOR_DOC = "shi-labs/oneformer_ade20k_swin_tiny" + + +from ..deprecated._archive_maps import ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +# Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention +def multi_scale_deformable_attention( + value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor +) -> Tensor: + batch_size, _, num_heads, hidden_dim = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape + value_list = value.split([height.item() * width.item() for height, width in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level_id, (height, width) in enumerate(value_spatial_shapes): + # batch_size, height*width, num_heads, hidden_dim + # -> batch_size, height*width, num_heads*hidden_dim + # -> batch_size, num_heads*hidden_dim, height*width + # -> batch_size*num_heads, hidden_dim, height, width + value_l_ = ( + value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) + ) + # batch_size, num_queries, num_heads, num_points, 2 + # -> batch_size, num_heads, num_queries, num_points, 2 + # -> batch_size*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) + # batch_size*num_heads, hidden_dim, num_queries, num_points + sampling_value_l_ = nn.functional.grid_sample( + value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False + ) + sampling_value_list.append(sampling_value_l_) + # (batch_size, num_queries, num_heads, num_levels, num_points) + # -> (batch_size, num_heads, num_queries, num_levels, num_points) + # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + batch_size * num_heads, 1, num_queries, num_levels * num_points + ) + output = ( + (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) + .sum(-1) + .view(batch_size, num_heads * hidden_dim, num_queries) + ) + return output.transpose(1, 2).contiguous() + + +# Copied from transformers.models.maskformer.modeling_maskformer.dice_loss +def dice_loss(inputs: Tensor, labels: Tensor, num_masks: int) -> Tensor: + r""" + Compute the DICE loss, similar to generalized IOU for masks as follows: + + $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}} $$ + + In practice, since `labels` is a binary mask, (only 0s and 1s), dice can be computed as follow + + $$ \mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}} $$ + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + num_masks (`int`): + The number of masks present in the current batch, used for normalization. + + Returns: + `torch.Tensor`: The computed loss. + """ + probs = inputs.sigmoid().flatten(1) + numerator = 2 * (probs * labels).sum(-1) + denominator = probs.sum(-1) + labels.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + loss = loss.sum() / num_masks + return loss + + +# Copied from transformers.models.mask2former.modeling_mask2former.sigmoid_cross_entropy_loss +def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor: + r""" + Args: + inputs (`torch.Tensor`): + A float tensor of arbitrary shape. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + + Returns: + loss (`torch.Tensor`): The computed loss. + """ + criterion = nn.BCEWithLogitsLoss(reduction="none") + cross_entropy_loss = criterion(inputs, labels) + + loss = cross_entropy_loss.mean(1).sum() / num_masks + return loss + + +# Copied from transformers.models.maskformer.modeling_maskformer.pair_wise_dice_loss +def pair_wise_dice_loss(inputs: Tensor, labels: Tensor) -> Tensor: + """ + A pair wise version of the dice loss, see `dice_loss` for usage. + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + + Returns: + `torch.Tensor`: The computed loss between each pairs. + """ + inputs = inputs.sigmoid().flatten(1) + numerator = 2 * torch.matmul(inputs, labels.T) + # using broadcasting to get a [num_queries, NUM_CLASSES] matrix + denominator = inputs.sum(-1)[:, None] + labels.sum(-1)[None, :] + loss = 1 - (numerator + 1) / (denominator + 1) + return loss + + +# Copied from transformers.models.mask2former.modeling_mask2former.pair_wise_sigmoid_cross_entropy_loss +def pair_wise_sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: + r""" + A pair wise version of the cross entropy loss, see `sigmoid_cross_entropy_loss` for usage. + + Args: + inputs (`torch.Tensor`): + A tensor representing a mask. + labels (`torch.Tensor`): + A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs + (0 for the negative class and 1 for the positive class). + + Returns: + loss (`torch.Tensor`): The computed loss between each pairs. + """ + + height_and_width = inputs.shape[1] + + criterion = nn.BCEWithLogitsLoss(reduction="none") + cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs)) + cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs)) + + loss_pos = torch.matmul(cross_entropy_loss_pos / height_and_width, labels.T) + loss_neg = torch.matmul(cross_entropy_loss_neg / height_and_width, (1 - labels).T) + loss = loss_pos + loss_neg + return loss + + +# Copied from transformers.models.mask2former.modeling_mask2former.sample_point +def sample_point( + input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs +) -> torch.Tensor: + """ + A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors. + + Args: + input_features (`torch.Tensor` of shape (batch_size, channels, height, width)): + A tensor that contains features map on a height * width grid + point_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,: + 2)): + A tensor that contains [0, 1] * [0, 1] normalized point coordinates + add_dim (`bool`): + boolean value to keep track of added dimension + + Returns: + point_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels, + height_grid, width_grid): + A tensor that contains features for points in `point_coordinates`. + """ + if point_coordinates.dim() == 3: + add_dim = True + point_coordinates = point_coordinates.unsqueeze(2) + + # use nn.function.grid_sample to get features for points in `point_coordinates` via bilinear interpolation + point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs) + if add_dim: + point_features = point_features.squeeze(3) + + return point_features + + +# Refactored from https://github.com/SHI-Labs/OneFormer/blob/33ebb56ed34f970a30ae103e786c0cb64c653d9a/oneformer/modeling/matcher.py#L93 +class OneFormerHungarianMatcher(nn.Module): + def __init__( + self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544 + ): + """This class computes an assignment between the labels and the predictions of the network. + + For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more + predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + Params: + cost_class (float, *optional*, defaults to 1.0): + This is the relative weight of the classification error in the matching cost. + cost_mask (float, *optional*, defaults to 1.0): + This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost. + cost_dice (float, *optional*, defaults to 1.0): + This is the relative weight of the dice loss of the binary mask in the matching cost + num_points (int, *optional*, defaults to 12544): + Number of points to be sampled for dice and mask loss matching cost. + """ + super().__init__() + if cost_class == 0 and cost_mask == 0 and cost_dice == 0: + raise ValueError("All costs cant be 0") + self.cost_class = cost_class + self.cost_mask = cost_mask + self.cost_dice = cost_dice + self.num_points = num_points + + @torch.no_grad() + def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]: + """Performs the matching + + Params: + masks_queries_logits (`torch.Tensor`): + A tensor` of dim `batch_size, num_queries, num_labels` with the + classification logits. + class_queries_logits (`torch.Tensor`): + A tensor` of dim `batch_size, num_queries, height, width` with the + predicted masks. + + class_labels (`torch.Tensor`): + A tensor` of dim `num_target_boxes` (where num_target_boxes is the number + of ground-truth objects in the target) containing the class labels. + mask_labels (`torch.Tensor`): + A tensor` of dim `num_target_boxes, height, width` containing the target + masks. + + Returns: + `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected labels (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_targets). + """ + indices: List[Tuple[np.array]] = [] + + num_queries = class_queries_logits.shape[1] + + preds_masks = masks_queries_logits + preds_probs = class_queries_logits + # iterate through batch size + for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels): + pred_probs = pred_probs.softmax(-1) + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -pred_probs[:, labels] + + pred_mask = pred_mask[:, None] + target_mask = target_mask[:, None].to(pred_mask.device) + + # all masks share the same set of points for efficient matching! + point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device) + + # get ground truth labels + target_mask = sample_point( + target_mask, + point_coords.repeat(target_mask.shape[0], 1, 1), + align_corners=False, + ).squeeze(1) + + pred_mask = sample_point( + pred_mask, + point_coords.repeat(pred_mask.shape[0], 1, 1), + align_corners=False, + ).squeeze(1) + + with autocast(enabled=False): + pred_mask = pred_mask.float() + target_mask = target_mask.float() + + # compute the sigmoid ce loss + cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask) + # Compute the dice loss + cost_dice = pair_wise_dice_loss(pred_mask, target_mask) + # final cost matrix + cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice + cost_matrix = cost_matrix.reshape(num_queries, -1).cpu() + # do the assigmented using the hungarian algorithm in scipy + assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) + indices.append(assigned_indices) + + # It could be stacked in one tensor + matched_indices = [ + (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices + ] + return matched_indices + + +class OneFormerLoss(nn.Module): + def __init__( + self, + num_classes: int, + matcher: OneFormerHungarianMatcher, + weight_dict: Dict[str, float], + eos_coef: float, + num_points: int, + oversample_ratio: float, + importance_sample_ratio: float, + contrastive_temperature: float = None, + ): + """ + This class computes the losses using the class predictions, mask predictions and the contrastive queries. + + Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for + calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive + loss. + + Args: + num_labels (`int`): + The number of classes. + matcher (`OneFormerHungarianMatcher`): + A torch module that computes the assigments between the predictions and labels. + weight_dict (`Dict[str, float]`): + A dictionary of weights to be applied to the different losses. + eos_coef (`float`): + Weight to apply to the null class. + num_points (`int`): + Number of points to be sampled for dice and mask loss calculations. + oversample_ratio (`float`): + Required for pointwise loss calculation. + importance_sample_ratio (`float`): + Required for pointwise loss calculation. + contrastive_temperature (`float`): + Temperature for scaling the contrastive logits. + """ + requires_backends(self, ["scipy"]) + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[-1] = self.eos_coef + self.register_buffer("empty_weight", empty_weight) + + # pointwise mask loss parameters + self.num_points = num_points + self.oversample_ratio = oversample_ratio + self.importance_sample_ratio = importance_sample_ratio + self.contrastive_temperature = contrastive_temperature + if self.contrastive_temperature is not None: + self.logit_scale = nn.Parameter(torch.tensor(np.log(1 / contrastive_temperature))) + + def _max_by_axis(self, the_list: List[List[int]]) -> List[int]: + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + def _pad_images_to_max_in_batch(self, tensors: List[Tensor]) -> Tuple[Tensor, Tensor]: + # get the maximum size in the batch + max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors]) + batch_size = len(tensors) + # compute finel size + batch_shape = [batch_size] + max_size + b, _, h, w = batch_shape + # get metadata + dtype = tensors[0].dtype + device = tensors[0].device + padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device) + padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device) + # pad the tensors to the size of the biggest one + for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks): + padded_tensor[: tensor.shape[0], : tensor.shape[1], : tensor.shape[2]].copy_(tensor) + padding_mask[: tensor.shape[1], : tensor.shape[2]] = False + + return padded_tensors, padding_masks + + def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor): + """Compute the query-text contrastive loss. + + Args: + contrastive_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, hidden_dim` + text_queries (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, hidden_dim` + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: + - **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries + and text queries derived from input text list. + """ + + image_queries = contrastive_queries_logits.float() + + # [batch_size, hidden_dim] + image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1) + text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1) + + logit_scale = torch.clamp(self.logit_scale.exp(), max=100) + + logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale + logits_per_img = logits_per_text.t() + + loss_img = nn.functional.cross_entropy( + logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device) + ) + loss_text = nn.functional.cross_entropy( + logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device) + ) + + loss_contrastive = loss_img + loss_text + + losses = {"loss_contrastive": loss_contrastive} + return losses + + def loss_labels( + self, class_queries_logits: Tensor, class_labels: List[Tensor], indices: Tuple[np.array] + ) -> Dict[str, Tensor]: + """Compute the losses related to the labels using cross entropy. + + Args: + class_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, num_labels` + class_labels (`List[torch.Tensor]`): + List of class labels of shape `(labels)`. + indices (`Tuple[np.array])`: + The indices computed by the Hungarian matcher. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key: + - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. + """ + pred_logits = class_queries_logits + batch_size, num_queries, _ = pred_logits.shape + criterion = nn.CrossEntropyLoss(weight=self.empty_weight) + idx = self._get_predictions_permutation_indices(indices) + + # shape = (batch_size, num_queries) + target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)]) + # shape = (batch_size, num_queries) + target_classes = torch.full( + (batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device + ) + target_classes[idx] = target_classes_o + # permute pred_logits (batch_size, num_queries, num_labels) -> (batch_size, num_labels, num_queries) + pred_logits_transposed = pred_logits.transpose(1, 2) + loss_ce = criterion(pred_logits_transposed, target_classes) + losses = {"loss_cross_entropy": loss_ce} + return losses + + def loss_masks( + self, masks_queries_logits: Tensor, mask_labels: List[Tensor], indices: Tuple[np.array], num_masks: int + ) -> Dict[str, Tensor]: + """Compute the losses related to the masks using focal and dice loss. + + Args: + masks_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, height, width` + mask_labels (`torch.Tensor`): + List of mask labels of shape `(labels, height, width)`. + indices (`Tuple[np.array])`: + The indices computed by the Hungarian matcher. + num_masks (`int)`: + The number of masks, used for normalization. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: + - **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks. + - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth + masks. + """ + src_idx = self._get_predictions_permutation_indices(indices) + tgt_idx = self._get_targets_permutation_indices(indices) + # shape (batch_size * num_queries, height, width) + pred_masks = masks_queries_logits[src_idx] + # shape (batch_size, num_queries, height, width) + # pad all and stack the targets to the num_labels dimension + # upsample predictions to the target size, we have to add one dim to use interpolate + target_masks, _ = self._pad_images_to_max_in_batch(mask_labels) + target_masks = target_masks[tgt_idx] + + pred_masks = pred_masks[:, None] + target_masks = target_masks[:, None] + + with torch.no_grad(): + # sample point_coords + point_coords = self.sample_points_using_uncertainty( + pred_masks, + self.calculate_uncertainty, + self.num_points, + self.oversample_ratio, + self.importance_sample_ratio, + ) + # get ground-truth labels + point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1) + + point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1) + + losses = { + "loss_mask": sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), + "loss_dice": dice_loss(point_logits, point_labels, num_masks), + } + + del pred_masks + del target_masks + return losses + + # Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.calculate_uncertainty + def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor: + """ + In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits' + for the foreground class in `classes`. + + Args: + logits (`torch.Tensor`): + A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is: + the number of foreground classes. The values are logits. + + Returns: + scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most + uncertain locations having the highest uncertainty score. + """ + uncertainty_scores = -(torch.abs(logits)) + return uncertainty_scores + + # Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerLoss.sample_points_using_uncertainty + def sample_points_using_uncertainty( + self, + logits: torch.Tensor, + uncertainty_function, + num_points: int, + oversample_ratio: int, + importance_sample_ratio: float, + ) -> torch.Tensor: + """ + This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The + uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit + prediction as input. + + Args: + logits (`float`): + Logit predictions for P points. + uncertainty_function: + A function that takes logit predictions for P points and returns their uncertainties. + num_points (`int`): + The number of points P to sample. + oversample_ratio (`int`): + Oversampling parameter. + importance_sample_ratio (`float`): + Ratio of points that are sampled via importance sampling. + + Returns: + point_coordinates (`torch.Tensor`): + Coordinates for P sampled points. + """ + + num_boxes = logits.shape[0] + num_points_sampled = int(num_points * oversample_ratio) + + # Get random point coordinates + point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device) + # Get sampled prediction value for the point coordinates + point_logits = sample_point(logits, point_coordinates, align_corners=False) + # Calculate the uncertainties based on the sampled prediction values of the points + point_uncertainties = uncertainty_function(point_logits) + + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + + idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device) + idx += shift[:, None] + point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2) + + if num_random_points > 0: + point_coordinates = torch.cat( + [point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)], + dim=1, + ) + return point_coordinates + + def _get_predictions_permutation_indices(self, indices): + # permute predictions following indices + batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + predictions_indices = torch.cat([src for (src, _) in indices]) + return batch_indices, predictions_indices + + def _get_targets_permutation_indices(self, indices): + # permute labels following indices + batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + target_indices = torch.cat([tgt for (_, tgt) in indices]) + return batch_indices, target_indices + + def forward( + self, + masks_queries_logits: Tensor, + class_queries_logits: Tensor, + contrastive_queries_logits: Tensor, + mask_labels: List[Tensor], + class_labels: List[Tensor], + text_queries: Tensor, + auxiliary_predictions: Optional[Dict[str, Tensor]] = None, + calculate_contrastive_loss: bool = True, + ) -> Dict[str, Tensor]: + """ + This performs the loss computation. + + Args: + masks_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, height, width` + class_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, num_labels` + contrastive_queries_logits (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, hidden_dim` + mask_labels (`torch.Tensor`): + List of mask labels of shape `(labels, height, width)`. + class_labels (`List[torch.Tensor]`): + List of class labels of shape `(labels)`. + text_queries (`torch.Tensor`): + A tensor of shape `batch_size, num_queries, hidden_dim` + auxiliary_predictions (`Dict[str, torch.Tensor]`, *optional*): + if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the + inner layers of the Detr's Decoder. + calculate_contrastive_loss (`bool`, *optional*, defaults to `True`): + Whether or not to calculate the contrastive loss. + + Returns: + `Dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys: + - **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels. + - **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks. + - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth + masks. + - **loss_contrastive** -- The query-text contrstive loss computed using object and text queries. + if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains addional losses + for each auxiliary predictions. + """ + + # retrieve the matching between the outputs of the last layer and the labels + indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels) + # compute the average number of target masks for normalization purposes + num_masks = self.get_num_masks(class_labels, device=class_labels[0].device) + # get all the losses + losses: Dict[str, Tensor] = { + **self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), + **self.loss_labels(class_queries_logits, class_labels, indices), + } + if calculate_contrastive_loss: + losses = {**losses, **self.loss_contrastive(contrastive_queries_logits, text_queries)} + + # in case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if auxiliary_predictions is not None: + for idx, aux_outputs in enumerate(auxiliary_predictions): + masks_queries_logits = aux_outputs["masks_queries_logits"] + class_queries_logits = aux_outputs["class_queries_logits"] + loss_dict = self.forward( + masks_queries_logits, + class_queries_logits, + None, + mask_labels, + class_labels, + None, + calculate_contrastive_loss=False, + ) + loss_dict = {f"{key}_{idx}": value for key, value in loss_dict.items()} + losses.update(loss_dict) + + return losses + + def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor: + """ + Computes the average number of target masks across the batch, for normalization purposes. + """ + num_masks = sum([len(classes) for classes in class_labels]) + num_masks = torch.as_tensor([num_masks], dtype=torch.float, device=device) + world_size = 1 + if is_accelerate_available(): + if PartialState._shared_state != {}: + num_masks = reduce(num_masks) + world_size = PartialState().num_processes + + num_masks = torch.clamp(num_masks / world_size, min=1) + return num_masks + + +@dataclass +class OneFormerTransformerDecoderOutput(BaseModelOutput): + """ + Base class for outputs of the Transformer decoder. This class adds attributes for class predictions, mask + predictions and contrastive logits to BaseModelOutputWithCrossAttentions. + + Args: + object_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): + Queries representation for the region proposals. + contrastive_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`): + Queries representation for the contrastive loss. + prediction_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`): + Mask predictions from last layer of the transformer decoder. + prediction_class (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): + Class predictions from last layer of the transformer decoder. + auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*): + Tuple of class and mask predictions from each layer of the transformer decoder. + """ + + object_queries: torch.FloatTensor = None + contrastive_logits: Optional[torch.FloatTensor] = None + prediction_masks: torch.FloatTensor = None + prediction_class: torch.FloatTensor = None + auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None + + +@dataclass +# Copied from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoderOutput with Mask2->One +class OneFormerPixelDecoderOutput(ModelOutput): + """ + OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns + the mask features and the multiscale features. + + Args: + multi_scale_features (`tuple(torch.FloatTensor)`): + Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height, + width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder. + mask_features (`torch.FloatTensor`): + Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder + Layer. + attentions (`tuple(torch.FloatTensor)`, *optional*): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed + or when `config.output_attentions=True` + """ + + multi_scale_features: Tuple[torch.FloatTensor] = None + mask_features: torch.FloatTensor = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class OneFormerPixelLevelModuleOutput(ModelOutput): + """ + OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the + `encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale + Deformable Attention based decoder. + + Args: + encoder_features (List of `(torch.FloatTensor)`): + List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + decoder_features (List of `(torch.FloatTensor)`): + List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also + called feature maps) of the model at the output of each stage. + decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)): + 1/4 scale features from the last Pixel Decoder Layer. + """ + + encoder_features: List[torch.FloatTensor] = None + decoder_features: List[torch.FloatTensor] = None + decoder_last_feature: torch.FloatTensor = None + + +@dataclass +class OneFormerModelOutput(ModelOutput): + """ + Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits. + + Args: + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder + model at the output of each stage. + pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel + decoder model at the output of each stage. + transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the + transformer decoder at the output of each stage. + transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) + Output object queries from the last layer in the transformer decoder. + transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) + Contrastive queries from the transformer decoder. + transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`) + Mask Predictions from the last layer in the transformer decoder. + transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): + Class Predictions from the last layer in the transformer decoder. + transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*): + Tuple of class and mask predictions from each layer of the transformer decoder. + text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`) + Text queries derived from the input text list used for calculating contrastive loss during training. + task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`) + 1D task token to condition the queries. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Self and Cross Attentions weights from transformer decoder. + """ + + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None + transformer_decoder_object_queries: torch.FloatTensor = None + transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None + transformer_decoder_mask_predictions: torch.FloatTensor = None + transformer_decoder_class_predictions: torch.FloatTensor = None + transformer_decoder_auxiliary_predictions: Optional[Tuple[Dict[str, torch.FloatTensor]]] = None + text_queries: Optional[torch.FloatTensor] = None + task_token: torch.FloatTensor = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class OneFormerForUniversalSegmentationOutput(ModelOutput): + """ + Class for outputs of [`OneFormerForUniversalSegmentationOutput`]. + + This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or + [`~OneFormerImageProcessor.post_process_instance_segmentation`] or + [`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see + [`~OneFormerImageProcessor] for details regarding usage. + + Args: + loss (`torch.Tensor`, *optional*): + The computed loss, returned when labels are present. + class_queries_logits (`torch.FloatTensor`): + A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each + query. Note the `+ 1` is needed because we incorporate the null class. + masks_queries_logits (`torch.FloatTensor`): + A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each + query. + auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*): + List of class and mask predictions from each layer of the transformer decoder. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder + model at the output of each stage. + pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel + decoder model at the output of each stage. + transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the + transformer decoder at the output of each stage. + transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) + Output object queries from the last layer in the transformer decoder. + transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`) + Contrastive queries from the transformer decoder. + transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`) + Mask Predictions from the last layer in the transformer decoder. + transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`): + Class Predictions from the last layer in the transformer decoder. + transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*): + List of class and mask predictions from each layer of the transformer decoder. + text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`) + Text queries derived from the input text list used for calculating contrastive loss during training. + task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`) + 1D task token to condition the queries. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Self and Cross Attentions weights from transformer decoder. + """ + + loss: Optional[torch.FloatTensor] = None + class_queries_logits: torch.FloatTensor = None + masks_queries_logits: torch.FloatTensor = None + auxiliary_predictions: List[Dict[str, torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + pixel_decoder_hidden_states: Optional[List[torch.FloatTensor]] = None + transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None + transformer_decoder_object_queries: torch.FloatTensor = None + transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None + transformer_decoder_mask_predictions: torch.FloatTensor = None + transformer_decoder_class_predictions: torch.FloatTensor = None + transformer_decoder_auxiliary_predictions: Optional[List[Dict[str, torch.FloatTensor]]] = None + text_queries: Optional[torch.FloatTensor] = None + task_token: torch.FloatTensor = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + + +# Modified from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrFrozenBatchNorm2d with DeformableDetr->OneFormerPixelDecoder +class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than + torchvision.models.resnet[18,34,50,101] produce nans. + """ + + def __init__(self, n): + super().__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + weight = self.weight.reshape(1, -1, 1, 1) + bias = self.bias.reshape(1, -1, 1, 1) + running_var = self.running_var.reshape(1, -1, 1, 1) + running_mean = self.running_mean.reshape(1, -1, 1, 1) + epsilon = 1e-5 + scale = weight * (running_var + epsilon).rsqrt() + bias = bias - running_mean * scale + return x * scale + bias + + +# Modified from transformers.models.detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention with DeformableDetr->OneFormerPixelDecoderEncoder +class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module): + """ + Multiscale deformable attention as proposed in Deformable DETR. + """ + + def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): + super().__init__() + if embed_dim % num_heads != 0: + raise ValueError( + f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" + ) + dim_per_head = embed_dim // num_heads + # check if dim_per_head is power of 2 + if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): + warnings.warn( + "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" + " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" + " implementation." + ) + + self.im2col_step = 128 + + self.d_model = embed_dim + self.n_levels = n_levels + self.n_heads = num_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) + self.value_proj = nn.Linear(embed_dim, embed_dim) + self.output_proj = nn.Linear(embed_dim, embed_dim) + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + batch_size, num_queries, _ = hidden_states.shape + batch_size, sequence_length, _ = encoder_hidden_states.shape + if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: + raise ValueError( + "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" + ) + + value = self.value_proj(encoder_hidden_states) + if attention_mask is not None: + # we invert the attention_mask + value = value.masked_fill(attention_mask[..., None], float(0)) + value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 + ) + attention_weights = self.attention_weights(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels * self.n_points + ) + attention_weights = nn.functional.softmax(attention_weights, -1).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points + ) + # batch_size, num_queries, n_heads, n_levels, n_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = ( + reference_points[:, :, None, :, None, :] + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + ) + elif reference_points.shape[-1] == 4: + sampling_locations = ( + reference_points[:, :, None, :, None, :2] + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + ) + else: + raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") + # PyTorch implementation + output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + + return output, attention_weights + + +class OneFormerPixelDecoderEncoderLayer(nn.Module): + def __init__(self, config: OneFormerConfig): + super().__init__() + self.embed_dim = config.conv_dim + self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + n_levels=3, + n_points=4, + ) + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + self.dropout = config.dropout + self.activation_fn = nn.functional.relu + self.activation_dropout = config.dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim) + self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) + + self.is_training = config.is_training + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + position_embeddings: torch.Tensor = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Input to the layer. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Attention mask. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings, to be added to `hidden_states`. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes of the backbone feature maps. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training) + + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if self.is_training: + if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetrEncoder->OneFormerPixelDecoderEncoderOnly +class OneFormerPixelDecoderEncoderOnly(nn.Module): + """ + Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a + [`OneFormerPixelDecoderEncoderLayer`]. + + The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. + + Args: + config: OneFormerConfig + """ + + def __init__(self, config: OneFormerConfig): + super().__init__() + + self.config = config + self.dropout = config.dropout + self.layers = nn.ModuleList([OneFormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)]) + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """ + Get reference points for each feature map. Used in decoder. + + Args: + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Valid ratios of each feature map. + device (`torch.device`): + Device on which to create the tensors. + Returns: + `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` + """ + reference_points_list = [] + for lvl, (height, width) in enumerate(spatial_shapes): + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), + torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), + ) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + position_embeddings=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + - 1 for pixel features that are real (i.e. **not masked**), + - 0 for pixel features that are padding (i.e. **masked**). + [What are attention masks?](../glossary#attention-mask) + position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): + Starting index of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Ratio of valid area in each feature level. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = inputs_embeds + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for i, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +# Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelDecoder with Mask2->One +class OneFormerPixelDecoder(nn.Module): + def __init__(self, config: OneFormerConfig, feature_channels): + super().__init__() + + self.config = config + + # positional encoding + self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True) + self.num_feature_levels = 3 + transformer_in_channels = feature_channels[-self.num_feature_levels :] + self.transformer_feature_strides = config.strides[-self.num_feature_levels :] + self.feature_channels = feature_channels + self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim)) + + # Create input projection layers + if self.num_feature_levels > 1: + input_projections_list = [] + for in_channels in transformer_in_channels[::-1]: + input_projections_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.conv_dim, kernel_size=1), + nn.GroupNorm(32, config.conv_dim), + ) + ) + self.input_projections = nn.ModuleList(input_projections_list) + else: + self.input_projections = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1), + nn.GroupNorm(32, config.conv_dim), + ) + ] + ) + + self.encoder = OneFormerPixelDecoderEncoderOnly(config) + + self.mask_projection = nn.Conv2d( + config.conv_dim, + config.mask_dim, + kernel_size=1, + stride=1, + padding=0, + ) + + self.common_stride = config.common_stride + + # extra fpn levels + stride = min(self.transformer_feature_strides) + self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride)) + + lateral_convs = [] + output_convs = [] + + for idx, in_channels in enumerate(self.feature_channels[: self.num_fpn_levels]): + lateral_conv = nn.Sequential( + nn.Conv2d( + in_channels, + config.conv_dim, + kernel_size=1, + bias=False, + ), + nn.GroupNorm(32, config.conv_dim), + ) + output_conv = nn.Sequential( + nn.Conv2d( + config.conv_dim, + config.conv_dim, + kernel_size=3, + stride=1, + padding=1, + bias=False, + ), + nn.GroupNorm(32, config.conv_dim), + nn.ReLU(), + ) + self.add_module("adapter_{}".format(idx + 1), lateral_conv) + self.add_module("layer_{}".format(idx + 1), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + + def get_valid_ratio(self, mask, dtype=torch.float32): + """Get the valid ratio of all feature maps.""" + + _, height, width = mask.shape + valid_height = torch.sum(~mask[:, :, 0], 1) + valid_width = torch.sum(~mask[:, 0, :], 1) + valid_ratio_heigth = valid_height.to(dtype) / height + valid_ratio_width = valid_width.to(dtype) / width + valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) + return valid_ratio + + def forward( + self, + features, + encoder_outputs=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + sources = [] + position_embeddings_list = [] + for level, source in enumerate(features[::-1][: self.num_feature_levels]): + sources.append(self.input_projections[level](source)) + position_embeddings_list.append(self.position_embedding(source)) + + masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources] + + # Prepare encoder inputs (by flattening) + source_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): + batch_size, num_channels, height, width = source.shape + spatial_shape = (height, width) + spatial_shapes.append(spatial_shape) + source = source.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + source_flatten.append(source) + mask_flatten.append(mask) + source_flatten = torch.cat(source_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1) + + # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder + # Also provide spatial_shapes, level_start_index and valid_ratios + if encoder_outputs is None: + encoder_outputs = self.encoder( + inputs_embeds=source_flatten, + attention_mask=mask_flatten, + position_embeddings=lvl_pos_embed_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + y = encoder_outputs.last_hidden_state + bs = y.shape[0] + + split_size_or_sections = [None] * self.num_feature_levels + for i in range(self.num_feature_levels): + if i < self.num_feature_levels - 1: + split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i] + else: + split_size_or_sections[i] = y.shape[1] - level_start_index[i] + y = torch.split(y, split_size_or_sections, dim=1) + + out = [] + multi_scale_features = [] + num_cur_levels = 0 + for i, z in enumerate(y): + out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1])) + + # append `out` with extra FPN levels + # Reverse feature maps into top-down order (from low to high resolution) + for idx, feats in enumerate(features[: self.num_fpn_levels][::-1]): + lateral_conv = self.lateral_convs[idx] + output_conv = self.output_convs[idx] + cur_fpn = lateral_conv(feats) + # Following FPN implementation, we use nearest upsampling here + y = cur_fpn + nn.functional.interpolate( + out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False + ) + y = output_conv(y) + out.append(y) + + for o in out: + if num_cur_levels < self.num_feature_levels: + multi_scale_features.append(o) + num_cur_levels += 1 + + return OneFormerPixelDecoderOutput( + mask_features=self.mask_projection(out[-1]), + multi_scale_features=multi_scale_features, + attentions=encoder_outputs.attentions, + ) + + +# Modified from from transformers.models.mask2former.modeling_mask2former.Mask2FormerPixelLevelModule with Mask2->One +class OneFormerPixelLevelModule(nn.Module): + def __init__(self, config: OneFormerConfig): + """ + Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image + Segmentation](https://arxiv.org/abs/2112.01527). It runs the input image through a backbone and a pixel + decoder, generating multi-scale feature maps and pixel embeddings. + + Args: + config ([`OneFormerConfig`]): + The configuration used to instantiate this model. + """ + super().__init__() + self.encoder = load_backbone(config) + self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels) + + def forward(self, pixel_values: Tensor, output_hidden_states: bool = False) -> OneFormerPixelLevelModuleOutput: + features: List[Tensor] = self.encoder(pixel_values).feature_maps + decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states) + return OneFormerPixelLevelModuleOutput( + encoder_features=tuple(features), + decoder_features=decoder_output.multi_scale_features, + decoder_last_feature=decoder_output.mask_features, + ) + + +# Modified from transformers.models.detr.modeling_detr.DetrAttention with Detr->OneFormer +class OneFormerAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and + keys (as explained in the DETR paper). + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.Tensor] = None, + key_value_states: Optional[torch.Tensor] = None, + key_value_position_embeddings: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None + position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None + key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None + key_value_position_embeddings = ( + key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None + ) + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size, target_len, embed_dim = hidden_states.size() + + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states_original = hidden_states + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + # add key-value position embeddings to the key value states + if key_value_position_embeddings is not None: + key_value_states_original = key_value_states + key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + source_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is" + f" {attention_mask.size()}" + ) + attn_weights += attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) + + attn_output = self.out_proj(attn_output).permute(1, 0, 2) + + return attn_output, attn_weights_reshaped + + +class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module): + def __init__( + self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05 + ): + super().__init__() + self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True) + + self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + + self.activation = ACT2FN[activation] + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + output, + output_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output2, attention_weights = self.self_attn( + hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True + ) + output = output + self.dropout(output2) + output = self.norm(output) + + return output, attention_weights + + def forward_pre( + self, + output, + output_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output2 = self.norm(output) + output2, attention_weights = self.self_attn( + hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True + ) + output = output + self.dropout(output2) + + return output, attention_weights + + def forward( + self, + output, + output_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos) + return self.forward_post(output, output_mask, output_key_padding_mask, query_pos) + + +class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module): + def __init__( + self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05 + ): + super().__init__() + self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout) + + self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + + self.activation = ACT2FN[activation] + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + output, + memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output2, attention_weights = self.multihead_attn( + query=self.with_pos_embed(output, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + ) + output = output + self.dropout(output2) + output = self.norm(output) + + return output, attention_weights + + def forward_pre( + self, + output, + memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output2 = self.norm(output) + output2, attention_weights = self.multihead_attn( + query=self.with_pos_embed(output2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + ) + output = output + self.dropout(output2) + + return output, attention_weights + + def forward( + self, + output, + memory, + memory_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos) + + +class OneFormerTransformerDecoderFFNLayer(nn.Module): + def __init__( + self, + d_model, + dim_feedforward=2048, + dropout=0.0, + activation="relu", + normalize_before=False, + layer_norm_eps=1e-05, + ): + super().__init__() + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm = nn.LayerNorm(d_model, eps=layer_norm_eps) + + self.activation = ACT2FN[activation] + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, output): + output2 = self.linear2(self.dropout(self.activation(self.linear1(output)))) + output = output + self.dropout(output2) + output = self.norm(output) + return output + + def forward_pre(self, output): + output2 = self.norm(output) + output2 = self.linear2(self.dropout(self.activation(self.linear1(output2)))) + output = output + self.dropout(output2) + return output + + def forward(self, output): + if self.normalize_before: + return self.forward_pre(output) + return self.forward_post(output) + + +class OneFormerMLPPredictionHead(nn.Module): + def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int = 3): + """ + A classic Multi Layer Perceptron (MLP). + + Args: + input_dim (`int`): + The input dimensions. + hidden_dim (`int`): + The hidden dimensions. + output_dim (`int`): + The output dimensions. + num_layers (int, *optional*, defaults to 3): + The number of layers. + """ + super().__init__() + in_dims = [input_dim] + [hidden_dim] * (num_layers - 1) + out_dims = [hidden_dim] * (num_layers - 1) + [output_dim] + + layers = [] + for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)): + layers.append( + PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity()) + ) + + self.layers = nn.Sequential(*layers) + + def forward(self, input: Tensor) -> Tensor: + return self.layers(input) + + +# refactored from original implementation +class OneFormerTransformerDecoderLayer(nn.Module): + def __init__(self, config: OneFormerConfig): + super().__init__() + self.embed_dim = config.hidden_dim + self.num_feature_levels = 3 + + self.cross_attn = OneFormerTransformerDecoderCrossAttentionLayer( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + dropout=0.0, + normalize_before=config.pre_norm, + layer_norm_eps=config.layer_norm_eps, + ) + + self.self_attn = OneFormerTransformerDecoderSelfAttentionLayer( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + dropout=0.0, + normalize_before=config.pre_norm, + layer_norm_eps=config.layer_norm_eps, + ) + + self.ffn = OneFormerTransformerDecoderFFNLayer( + d_model=self.embed_dim, + dim_feedforward=config.dim_feedforward, + dropout=0.0, + normalize_before=config.pre_norm, + layer_norm_eps=config.layer_norm_eps, + ) + + def forward( + self, + index: int, + output: torch.Tensor, + multi_stage_features: List[torch.Tensor], + multi_stage_positional_embeddings: List[torch.Tensor], + attention_mask: Optional[torch.Tensor] = None, + query_embeddings: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ): + """ + Args: + index (`int`): index of the layer in the Transformer decoder. + output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)` + multi_stage_features (`List[torch.Tensor]`): the multi-scale features from the pixel decoder. + multi_stage_positional_embeddings (`List[torch.Tensor]`): + positional embeddings for the multi_stage_features + attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer + query_embeddings (`torch.FloatTensor`, *optional*): + position embeddings that are added to the queries and keys in the self-attention layer. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + + level_index = index % self.num_feature_levels + attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False + + # Masked Cross Attention + output, cross_attn_weights = self.cross_attn( + output, + multi_stage_features[level_index], + memory_mask=attention_mask, + memory_key_padding_mask=None, # here we do not apply masking on padded region + pos=multi_stage_positional_embeddings[level_index], + query_pos=query_embeddings, + ) + + # Self Attention + output, self_attn_weights = self.self_attn( + output, + output_mask=None, + output_key_padding_mask=None, + query_pos=query_embeddings, + ) + + # Fully Connected + output = self.ffn(output) + + outputs = (output,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module): + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward( + self, + output, + memory, + output_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + intermediate = [] + + for layer in self.layers: + output = layer( + output, + memory, + output_mask=output_mask, + memory_mask=memory_mask, + output_key_padding_mask=output_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + ) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + layer_norm_eps=1e-05, + ): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = ACT2FN[activation] + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post( + self, + output, + memory, + output_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + q = k = self.with_pos_embed(output, query_pos) + output2 = self.self_attn(q, k, value=output, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) + output2 = output2[0] + output = output + self.dropout1(output2) + output = self.norm1(output) + output2 = self.multihead_attn( + query=self.with_pos_embed(output, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + ) + output2 = output2[0] + output = output + self.dropout2(output2) + output = self.norm2(output) + output2 = self.linear2(self.dropout(self.activation(self.linear1(output)))) + output = output + self.dropout3(output2) + output = self.norm3(output) + return output + + def forward_pre( + self, + output, + memory, + output_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + output2 = self.norm1(output) + q = k = self.with_pos_embed(output2, query_pos) + output2 = self.self_attn(q, k, value=output2, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) + output2 = output2[0] + output = output + self.dropout1(output2) + output2 = self.norm2(output) + output2 = self.multihead_attn( + query=self.with_pos_embed(output2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + ) + output2 = output2[0] + output = output + self.dropout2(output2) + output2 = self.norm3(output) + output2 = self.linear2(self.dropout(self.activation(self.linear1(output2)))) + output = output + self.dropout3(output2) + return output + + def forward( + self, + output, + memory, + output_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + output_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + ): + if self.normalize_before: + return self.forward_pre( + output, + memory, + output_mask, + memory_mask, + output_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + return self.forward_post( + output, + memory, + output_mask, + memory_mask, + output_key_padding_mask, + memory_key_padding_mask, + pos, + query_pos, + ) + + +class OneFormerTransformerDecoderQueryTransformer(nn.Module): + def __init__( + self, + d_model=512, + nhead=8, + num_decoder_layers=6, + dim_feedforward=2048, + dropout=0.1, + activation="relu", + normalize_before=False, + return_intermediate_dec=False, + layer_norm_eps=1e-05, + ): + super().__init__() + + decoder_layer = OneFormerTransformerDecoderQueryTransformerDecoderLayer( + d_model, nhead, dim_feedforward, dropout, activation, normalize_before, layer_norm_eps + ) + decoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.decoder = OneFormerTransformerDecoderQueryTransformerDecoder( + decoder_layer, + num_decoder_layers, + decoder_norm, + return_intermediate=return_intermediate_dec, + ) + + self.d_model = d_model + self.nhead = nhead + + def forward(self, src, mask, query_embed, pos_embed, task_token=None): + batch_size = src.shape[0] + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) + if mask is not None: + mask = mask.flatten(1) + + if task_token is None: + queries = torch.zeros_like(query_embed) + else: + queries = task_token.repeat(query_embed.shape[0], 1, 1) + + queries = self.decoder(queries, src, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) + return queries.transpose(1, 2) + + +class OneFormerTransformerDecoder(nn.Module): + """ + Transformer decoder + """ + + def __init__(self, in_channels: int, config: OneFormerConfig): + super().__init__() + self.config = config + + self.dropout = config.dropout + self.num_heads = config.num_attention_heads + self.is_training = config.is_training + self.use_task_norm = config.use_task_norm + self.use_auxiliary_loss = config.use_auxiliary_loss + + self.query_transformer = OneFormerTransformerDecoderQueryTransformer( + d_model=config.hidden_dim, + dropout=config.dropout, + nhead=config.num_attention_heads, + dim_feedforward=config.dim_feedforward, + num_decoder_layers=config.query_dec_layers, + normalize_before=config.pre_norm, + return_intermediate_dec=False, + layer_norm_eps=config.layer_norm_eps, + ) + + self.decoder_norm = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_eps) + + self.num_feature_levels = 3 + + self.layers = nn.ModuleList( + [OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)] + ) + + self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1) + + self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1) + self.mask_embed = OneFormerMLPPredictionHead( + config.hidden_dim, + config.hidden_dim, + config.mask_dim, + 3, + ) + + def forward( + self, + task_token=None, + multi_stage_features=None, + multi_stage_positional_embeddings=None, + mask_features=None, + query_features=None, + query_embeddings=None, + query_embedder=None, + size_list=None, + output_attentions=None, + ): + if self.use_task_norm: + task_token = self.decoder_norm(task_token) + + object_queries = self.query_transformer( + query_features, + None, + query_embedder.weight[:-1], + self.query_input_projection(mask_features), + task_token if self.use_task_norm else None, + ) + + object_queries = object_queries[0].permute(1, 0, 2) + + queries = torch.cat([object_queries, task_token], dim=0) + + output = queries.clone() + + intermediate_class_predictions = [] + intermediate_mask_predictions = [] + + # prediction heads on learnable query features + outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads( + output, mask_features, attention_mask_target_size=size_list[0] + ) + intermediate_class_predictions.append(outputs_class) + intermediate_mask_predictions.append(outputs_mask) + + attentions = () + + for index, layer in enumerate(self.layers): + layer_outputs = layer( + index=index, + output=output, + multi_stage_features=multi_stage_features, + multi_stage_positional_embeddings=multi_stage_positional_embeddings, + attention_mask=attention_mask, + query_embeddings=query_embeddings, + output_attentions=output_attentions, + ) + + output = layer_outputs[0] + attentions += (layer_outputs[1:],) + + outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads( + output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels] + ) + intermediate_class_predictions.append(outputs_class) + intermediate_mask_predictions.append(outputs_mask) + + if not len(intermediate_mask_predictions) == len(self.layers) + 1: + raise ValueError( + "Intermediate predictions in the transformer decoder must have the same number of elements as number" + " of layers" + ) + + object_queries = layer_outputs[0].permute(1, 0, 2) + + contrastive_logits = queries.permute(1, 0, 2) + + return OneFormerTransformerDecoderOutput( + object_queries=object_queries, + contrastive_logits=contrastive_logits, + prediction_masks=intermediate_mask_predictions[-1], + prediction_class=intermediate_class_predictions[-1], + auxiliary_predictions=self._get_aux_predictions( + intermediate_class_predictions, intermediate_mask_predictions + ) + if self.use_auxiliary_loss + else None, + attentions=attentions, + ) + + def forward_prediction_heads(self, output, mask_features, attention_mask_target_size): + decoder_output = self.decoder_norm(output) + decoder_output = decoder_output.transpose(0, 1) + outputs_class = self.class_embed(decoder_output) + mask_embed = self.mask_embed(decoder_output) + outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features) + + attention_mask = nn.functional.interpolate( + outputs_mask, size=attention_mask_target_size, mode="bilinear", align_corners=False + ) + + # must use bool type + # If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. + attention_mask = ( + attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5 + ).bool() + attention_mask = attention_mask.detach() + + return outputs_class, outputs_mask, attention_mask + + @torch.jit.unused + def _get_aux_predictions(self, outputs_class, outputs_seg_masks): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + aux_list = [ + {"class_queries_logits": a, "masks_queries_logits": b} + for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1]) + ] + return tuple(aux_list) + + +class OneFormerTransformerModule(nn.Module): + """ + The OneFormer's transformer module. + """ + + def __init__(self, in_features: int, config: OneFormerConfig): + super().__init__() + hidden_dim = config.hidden_dim + self.num_feature_levels = 3 + self.position_embedder = OneFormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True) + self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim) + self.input_projections = [] + + for _ in range(self.num_feature_levels): + if in_features != hidden_dim or config.enforce_input_proj: + self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1)) + else: + self.input_projections.append(nn.Sequential()) + + self.decoder = OneFormerTransformerDecoder(in_channels=in_features, config=config) + self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim) + + def forward( + self, + multi_scale_features: List[Tensor], + mask_features: Tensor, + task_token: Tensor, + output_attentions: bool = False, + ) -> OneFormerTransformerDecoderOutput: + if not len(multi_scale_features) == self.num_feature_levels: + raise ValueError( + f"Number of elements in multi_scale_features ({len(multi_scale_features)}) and num_feature_levels" + f" ({self.num_feature_levels}) do not match!" + ) + multi_stage_features = [] + multi_stage_positional_embeddings = [] + size_list = [] + + for i in range(self.num_feature_levels): + size_list.append(multi_scale_features[i].shape[-2:]) + multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i], None).flatten(2)) + multi_stage_features.append( + self.input_projections[i](multi_scale_features[i]).flatten(2) + + self.level_embed.weight[i][None, :, None] + ) + + # flatten NxCxHxW to HWxNxC + multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1) + multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1) + + _, batch_size, _ = multi_stage_features[0].shape + + # QxNxC + query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1) + task_token = task_token.unsqueeze(0) + + query_features = self.position_embedder(mask_features, None) + + return self.decoder( + task_token=task_token, + multi_stage_features=multi_stage_features, + multi_stage_positional_embeddings=multi_stage_positional_embeddings, + mask_features=mask_features, + query_features=query_features, + query_embeddings=query_embeddings, + query_embedder=self.queries_embedder, + size_list=size_list, + output_attentions=output_attentions, + ) + + +# Copied from transformers.models.maskformer.modeling_maskformer.MaskFormerSinePositionEmbedding with Mask->One +class OneFormerSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__( + self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None + ): + super().__init__() + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + self.scale = 2 * math.pi if scale is None else scale + + def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: + if mask is None: + mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + not_mask = (~mask).to(x.dtype) + y_embed = not_mask.cumsum(1) + x_embed = not_mask.cumsum(2) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=x.device).type_as(x) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +# Copied from transformers.models.maskformer.modeling_maskformer.PredictionBlock +class PredictionBlock(nn.Module): + def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: + super().__init__() + self.layers = [nn.Linear(in_dim, out_dim), activation] + # Maintain submodule indexing as if part of a Sequential block + for i, layer in enumerate(self.layers): + self.add_module(str(i), layer) + + def forward(self, input: Tensor) -> Tensor: + hidden_state = input + for layer in self.layers: + hidden_state = layer(hidden_state) + return hidden_state + + +class OneFormerTextMapperAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.q_proj = nn.Linear(dim, dim, bias=qkv_bias) + self.k_proj = nn.Linear(dim, dim, bias=qkv_bias) + self.v_proj = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, q, k, v): + batch_size, q_sequence_length, num_channels = q.shape + if not k.shape == v.shape: + raise ValueError(f"keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!") + batch_size, k_sequence_length, num_channels = k.shape + q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads) + k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads) + v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads) + + attn = torch.einsum("bnkc,bmkc->bknm", q, k) * self.scale + + attn = attn.softmax(dim=-1) + + output = torch.einsum("bknm,bmkc->bnkc", attn, v).reshape(batch_size, q_sequence_length, num_channels) + + output = self.proj(output) + output = self.proj_drop(output) + return output + + +class OneFormerTextTransformerDecoderLayer(nn.Module): + def __init__( + self, + d_model, + nhead, + dropout=0.1, + layer_norm_eps=1e-05, + ): + super().__init__() + self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout) + self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout) + + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) + self.dropout = nn.Dropout(dropout) + + self.mlp = nn.Sequential( + nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model) + ) + + def forward(self, hidden_state, mem): + q = k = v = self.norm1(hidden_state) + hidden_state = hidden_state + self.self_attn(q, k, v) + q = self.norm2(hidden_state) + hidden_state = hidden_state + self.cross_attn(q, mem, mem) + hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state))) + return hidden_state + + +class OneFormerTextContextDecoder(nn.Module): + def __init__( + self, + transformer_width=256, + transformer_heads=4, + transformer_layers=6, + visual_dim=1024, + dropout=0.1, + layer_norm_eps=1e-05, + **kwargs, + ): + super().__init__() + + self.memory_proj = nn.Sequential( + nn.LayerNorm(visual_dim, eps=layer_norm_eps), + nn.Linear(visual_dim, transformer_width), + nn.LayerNorm(transformer_width, eps=layer_norm_eps), + ) + + self.text_proj = nn.Sequential( + nn.LayerNorm(visual_dim, eps=layer_norm_eps), + nn.Linear(visual_dim, transformer_width), + ) + + self.decoder = nn.ModuleList( + [ + OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout, layer_norm_eps) + for _ in range(transformer_layers) + ] + ) + + self.out_proj = nn.Sequential( + nn.LayerNorm(transformer_width, eps=layer_norm_eps), nn.Linear(transformer_width, visual_dim) + ) + + def forward(self, text, visual): + visual = self.memory_proj(visual) + hidden_state = self.text_proj(text) + + for layer in self.decoder: + hidden_state = layer(hidden_state, visual) + + return self.out_proj(hidden_state) + + +class OneFormerTextMLP(nn.Module): + def __init__( + self, + hidden_size: Optional[int] = None, + intermediate_size: Optional[int] = None, + output_size: Optional[int] = None, + ): + super().__init__() + self.activation_fn = ACT2FN["quick_gelu"] + hidden_size = hidden_size + intermediate_size = intermediate_size + output_size = output_size + self.fc1 = nn.Linear(hidden_size, intermediate_size) + self.fc2 = nn.Linear(intermediate_size, output_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class OneFormerTextTransformerLayer(nn.Module): + def __init__(self, width: int, heads: int, attn_mask: torch.Tensor, layer_norm_eps=1e-05): + super().__init__() + self.self_attn = nn.MultiheadAttention(width, heads) + self.layer_norm1 = nn.LayerNorm(width, eps=layer_norm_eps) + self.mlp = OneFormerTextMLP(width, width * 4, width) + self.layer_norm2 = nn.LayerNorm(width, eps=layer_norm_eps) + self.attn_mask = attn_mask + + def forward( + self, + hidden_states: torch.Tensor, + key_padding_mask: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states = self.self_attn( + hidden_states, + hidden_states, + hidden_states, + need_weights=False, + key_padding_mask=key_padding_mask, + )[0] + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + return hidden_states + + +class OneFormerTextTransformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + attn_mask: torch.Tensor = None, + use_checkpoint=False, + layer_norm_eps=1e-05, + ): + super().__init__() + self.width = width + self.num_layers = layers + self.layers = nn.Sequential( + *[OneFormerTextTransformerLayer(width, heads, attn_mask, layer_norm_eps) for _ in range(layers)] + ) + self.use_checkpoint = use_checkpoint + + def forward(self, hidden_states: torch.Tensor): + for layer in self.layers: + if self.use_checkpoint: + hidden_states = self._gradient_checkpointing_func(layer, hidden_states) + else: + hidden_states = layer(hidden_states) + return hidden_states + + +class OneFormerTextEncoder(nn.Module): + def __init__( + self, + context_length: int, + width: int, + layers: int, + vocab_size, + use_checkpoint=False, + layer_norm_eps=1e-05, + ): + super().__init__() + heads = width // 64 + self.context_length = context_length + self.width = width + self.transformer = OneFormerTextTransformer( + width=width, + layers=layers, + heads=heads, + attn_mask=self.build_attention_mask(), + use_checkpoint=use_checkpoint, + layer_norm_eps=layer_norm_eps, + ) + + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width)) + self.ln_final = nn.LayerNorm(width, eps=layer_norm_eps) + self.token_embedding = nn.Embedding(vocab_size, width) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward(self, text): + hidden_state = self.token_embedding(text) + hidden_state = hidden_state + self.positional_embedding + hidden_state = hidden_state.permute(1, 0, 2) + hidden_state = self.transformer(hidden_state) + hidden_state = hidden_state.permute(1, 0, 2) + hidden_state = self.ln_final(hidden_state) + hidden_state = hidden_state[torch.arange(hidden_state.shape[0]), text.argmax(dim=-1)] + + return hidden_state + + +class OneFormerTextMapper(nn.Module): + def __init__(self, config: OneFormerConfig): + super().__init__() + self.text_encoder = OneFormerTextEncoder( + context_length=config.text_encoder_context_length, + width=config.text_encoder_width, + layers=config.text_encoder_num_layers, + vocab_size=config.text_encoder_vocab_size, + layer_norm_eps=config.layer_norm_eps, + ) + + self.text_projector = OneFormerMLPPredictionHead( + config.text_encoder_width, + config.hidden_dim, + config.hidden_dim, + config.text_encoder_proj_layers, + ) + if config.text_encoder_n_ctx > 0: + self.prompt_ctx = nn.Embedding( + config.text_encoder_n_ctx, + config.text_encoder_width, + ) + else: + self.prompt_ctx = None + + def forward( + self, + inputs: Tensor, + ) -> Tensor: + text_queries = self.encode_text(inputs) + + return text_queries + + def encode_text(self, text): + if text.ndim is None: + raise ValueError("text must not be NoneType") + if text.ndim not in [2, 3]: + raise ValueError("Number of dimensions in text must be 2 or 3") + squeeze_dim = False + num_text = 1 + if text.ndim == 3: + num_text = text.shape[1] + batch_size, num_text, hidden_dim = text.shape + text = text.reshape(batch_size * num_text, hidden_dim) + squeeze_dim = True + + # [batch_size, num_channels] + encoded_text = self.text_encoder(text) + + text_queries = self.text_projector(encoded_text) + + if squeeze_dim: + _, hidden_dim = text_queries.shape + text_queries = text_queries.reshape(batch_size, num_text, hidden_dim) + if self.prompt_ctx is not None: + text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1) + text_queries = torch.cat([text_queries, text_queries_ctx], dim=1) + + return text_queries + + +class OneFormerTaskModel(nn.Module): + def __init__(self, config: OneFormerConfig): + super().__init__() + self.task_mlp = OneFormerMLPPredictionHead( + config.task_seq_len, + config.hidden_dim, + config.hidden_dim, + 2, + ) + + def forward(self, inputs: Tensor) -> Tensor: + task_tokens = self.task_mlp(inputs) + return task_tokens + + +ONEFORMER_START_DOCSTRING = r""" + This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a + regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. + + Parameters: + config ([`OneFormerConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ONEFORMER_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`OneFormerProcessor`]. See + [`OneFormerProcessor.__call__`] for details. + task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`] + for details. + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of Detr's decoder attention layers. + return_dict (`bool`, *optional*): + Whether or not to return a [`~OneFormerModelOutput`] instead of a plain tuple. +""" + + +class OneFormerPreTrainedModel(PreTrainedModel): + config_class = OneFormerConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + + def _init_weights(self, module: nn.Module): + xavier_std = self.config.init_xavier_std + std = self.config.init_std + if isinstance(module, OneFormerTransformerModule): + if module.input_projections is not None: + for input_projection in module.input_projections: + if not isinstance(input_projection, nn.Sequential): + nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std) + nn.init.constant_(input_projection.bias, 0) + elif isinstance(module, OneFormerTransformerDecoder): + nn.init.xavier_uniform_(module.query_input_projection.weight, gain=xavier_std) + nn.init.constant_(module.query_input_projection.bias, 0) + module.query_input_projection._is_hf_initialized = True + elif isinstance(module, OneFormerPixelDecoderEncoderMultiscaleDeformableAttention): + nn.init.constant_(module.sampling_offsets.weight.data, 0.0) + thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = ( + (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) + .view(module.n_heads, 1, 1, 2) + .repeat(1, module.n_levels, module.n_points, 1) + ) + for i in range(module.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + nn.init.constant_(module.attention_weights.weight.data, 0.0) + nn.init.constant_(module.attention_weights.bias.data, 0.0) + nn.init.xavier_uniform_(module.value_proj.weight.data) + nn.init.constant_(module.value_proj.bias.data, 0.0) + nn.init.xavier_uniform_(module.output_proj.weight.data) + nn.init.constant_(module.output_proj.bias.data, 0.0) + elif isinstance(module, OneFormerPixelDecoderEncoderOnly): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + elif isinstance(module, OneFormerPixelDecoder): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + nn.init.normal_(module.level_embed, std=0) + elif isinstance(module, OneFormerTransformerDecoderSelfAttentionLayer): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p, gain=xavier_std) + elif isinstance(module, OneFormerTransformerDecoderCrossAttentionLayer): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p, gain=xavier_std) + elif isinstance(module, OneFormerTransformerDecoderFFNLayer): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p, gain=xavier_std) + elif isinstance(module, OneFormerTransformerDecoderQueryTransformer): + for p in module.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p, gain=xavier_std) + elif isinstance(module, OneFormerPixelLevelModule): + for submodule in module.modules(): + if isinstance(submodule, (nn.Conv2d, nn.Linear)): + submodule.weight.data.normal_(mean=0.0, std=std) + if submodule.bias is not None: + submodule.bias.data.zero_() + elif isinstance(module, OneFormerTextContextDecoder): + for submodule in module.modules(): + if isinstance(submodule, nn.Linear): + nn.init.trunc_normal_(submodule.weight, std=0.02) + if isinstance(submodule, nn.Linear) and submodule.bias is not None: + nn.init.constant_(submodule.bias, 0) + elif isinstance(submodule, nn.LayerNorm): + nn.init.constant_(submodule.bias, 0) + nn.init.constant_(submodule.weight, 1.0) + elif isinstance(module, OneFormerTextTransformer): + proj_std = (module.width**-0.5) * ((2 * module.num_layers) ** -0.5) + attn_std = module.width**-0.5 + fc_std = (2 * module.width) ** -0.5 + for layer in module.layers: + nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std) + nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std) + nn.init.normal_(layer.mlp.fc1.weight, std=fc_std) + nn.init.normal_(layer.mlp.fc2.weight, std=proj_std) + elif isinstance(module, OneFormerTextEncoder): + nn.init.normal_(module.token_embedding.weight, std=0.02) + nn.init.normal_(module.positional_embedding, std=0.01) + if hasattr(module, "reference_points"): + nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) + nn.init.constant_(module.reference_points.bias.data, 0.0) + elif isinstance(module, OneFormerTaskModel): + for submodule in module.modules(): + if isinstance(module, OneFormerMLPPredictionHead): + for submodule in module.modules(): + if isinstance(submodule, nn.Linear): + nn.init.xavier_uniform_(submodule.weight, gain=xavier_std) + nn.init.constant_(submodule.bias, 0) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, nn.MultiheadAttention): + module.in_proj_weight.data.normal_(mean=0.0, std=std) + module.in_proj_bias.data.zero_() + elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +@add_start_docstrings( + "The bare OneFormer Model outputting raw hidden-states without any specific head on top.", + ONEFORMER_START_DOCSTRING, +) +class OneFormerModel(OneFormerPreTrainedModel): + main_input_name = ["pixel_values", "task_inputs"] + + def __init__(self, config: OneFormerConfig): + super().__init__(config) + self.pixel_level_module = OneFormerPixelLevelModule(config) + self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config) + self.task_encoder = OneFormerTaskModel(config) + self.is_training = config.is_training + + if self.is_training: + self.text_mapper = OneFormerTextMapper(config) + else: + self.text_mapper = None + + self.post_init() + + @add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=OneFormerModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Tensor, + task_inputs: Tensor, + text_inputs: Optional[Tensor] = None, + pixel_mask: Optional[Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> OneFormerModelOutput: + r""" + Returns: + `OneFormerModelOutput` + Example: + + ```python + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from transformers import OneFormerProcessor, OneFormerModel + + >>> # download texting image + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> # load processor for preprocessing the inputs + >>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") + >>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") + >>> inputs = processor(image, ["semantic"], return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> mask_predictions = outputs.transformer_decoder_mask_predictions + >>> class_predictions = outputs.transformer_decoder_class_predictions + + >>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}" + '👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]' + ```""" + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, _, height, width = pixel_values.shape + + if pixel_mask is None: + pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device) + + pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states) + + multi_scale_features = pixel_level_module_output.decoder_features + mask_features = pixel_level_module_output.decoder_last_feature + + task_token = self.task_encoder(task_inputs.to(self.dtype)) + + if self.is_training: + text_queries = self.text_mapper(text_inputs) + else: + text_queries = None + + transformer_module_output = self.transformer_module( + multi_scale_features=multi_scale_features, + mask_features=mask_features, + task_token=task_token, + output_attentions=output_attentions, + ) + + queries = transformer_module_output.object_queries + + encoder_hidden_states = None + pixel_decoder_hidden_states = None + transformer_decoder_hidden_states = None + + if output_hidden_states: + encoder_hidden_states = pixel_level_module_output.encoder_features + pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,) + for f in pixel_level_module_output.decoder_features: + pixel_decoder_hidden_states += (f,) + transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions + + output = OneFormerModelOutput( + encoder_hidden_states=encoder_hidden_states, + pixel_decoder_hidden_states=pixel_decoder_hidden_states, + transformer_decoder_hidden_states=transformer_decoder_hidden_states, + transformer_decoder_object_queries=queries, + transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits, + transformer_decoder_mask_predictions=transformer_module_output.prediction_masks, + transformer_decoder_class_predictions=transformer_module_output.prediction_class, + transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions, + text_queries=text_queries, + task_token=task_token, + attentions=transformer_module_output.attentions, + ) + + if not return_dict: + output = tuple(v for v in output.values()) + + return output + + +@add_start_docstrings( + "OneFormer Model for instance, semantic and panoptic image segmentation.", + ONEFORMER_START_DOCSTRING, +) +class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel): + main_input_name = ["pixel_values", "task_inputs"] + + def __init__(self, config: OneFormerConfig): + super().__init__(config) + self.model = OneFormerModel(config) + + self.matcher = OneFormerHungarianMatcher( + cost_class=config.class_weight, + cost_dice=config.dice_weight, + cost_mask=config.mask_weight, + num_points=config.train_num_points, + ) + + self.weight_dict: Dict[str, float] = { + "loss_cross_entropy": config.class_weight, + "loss_mask": config.mask_weight, + "loss_dice": config.dice_weight, + "loss_contrastive": config.contrastive_weight, + } + + self.criterion = OneFormerLoss( + num_classes=config.num_labels, + matcher=self.matcher, + weight_dict=self.weight_dict, + eos_coef=config.no_object_weight, + num_points=config.train_num_points, + oversample_ratio=config.oversample_ratio, + importance_sample_ratio=config.importance_sample_ratio, + contrastive_temperature=config.contrastive_temperature, + ) + + self.post_init() + + def get_loss_dict( + self, + masks_queries_logits: Tensor, + class_queries_logits: Tensor, + contrastive_queries_logits: Tensor, + mask_labels: Tensor, + class_labels: Tensor, + text_queries: Tensor, + auxiliary_predictions: Dict[str, Tensor], + calculate_contrastive_loss: bool, + ) -> Dict[str, Tensor]: + loss_dict: Dict[str, Tensor] = self.criterion( + masks_queries_logits=masks_queries_logits, + class_queries_logits=class_queries_logits, + contrastive_queries_logits=contrastive_queries_logits, + mask_labels=mask_labels, + class_labels=class_labels, + text_queries=text_queries, + auxiliary_predictions=auxiliary_predictions, + calculate_contrastive_loss=calculate_contrastive_loss, + ) + + # weight each loss by `self.weight_dict[]` including auxiliary losses + for key, weight in self.weight_dict.items(): + for loss_key, loss in loss_dict.items(): + if key in loss_key: + loss *= weight + + return loss_dict + + def get_loss(self, loss_dict: Dict[str, Tensor]) -> Tensor: + return sum(loss_dict.values()) + + @add_start_docstrings_to_model_forward(ONEFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=OneFormerForUniversalSegmentationOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Tensor, + task_inputs: Tensor, + text_inputs: Optional[Tensor] = None, + mask_labels: Optional[List[Tensor]] = None, + class_labels: Optional[List[Tensor]] = None, + pixel_mask: Optional[Tensor] = None, + output_auxiliary_logits: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> OneFormerForUniversalSegmentationOutput: + r""" + text_inputs (`List[torch.Tensor]`, *optional*): + Tensor fof shape `(num_queries, sequence_length)` to be fed to a model + mask_labels (`List[torch.Tensor]`, *optional*): + List of mask labels of shape `(num_labels, height, width)` to be fed to a model + class_labels (`List[torch.LongTensor]`, *optional*): + list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the + labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. + + Returns: + `OneFormerUniversalSegmentationOutput` + Example: + + Universal segmentation example: + + ```python + >>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation + >>> from PIL import Image + >>> import requests + >>> import torch + + >>> # load OneFormer fine-tuned on ADE20k for universal segmentation + >>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") + >>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny") + + >>> url = ( + ... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" + ... ) + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> # Semantic Segmentation + >>> inputs = processor(image, ["semantic"], return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` + >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` + >>> class_queries_logits = outputs.class_queries_logits + >>> masks_queries_logits = outputs.masks_queries_logits + + >>> # you can pass them to processor for semantic postprocessing + >>> predicted_semantic_map = processor.post_process_semantic_segmentation( + ... outputs, target_sizes=[image.size[::-1]] + ... )[0] + >>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}" + '👉 Semantic Predictions Shape: [512, 683]' + + >>> # Instance Segmentation + >>> inputs = processor(image, ["instance"], return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` + >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` + >>> class_queries_logits = outputs.class_queries_logits + >>> masks_queries_logits = outputs.masks_queries_logits + + >>> # you can pass them to processor for instance postprocessing + >>> predicted_instance_map = processor.post_process_instance_segmentation( + ... outputs, target_sizes=[image.size[::-1]] + ... )[0]["segmentation"] + >>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}" + '👉 Instance Predictions Shape: [512, 683]' + + >>> # Panoptic Segmentation + >>> inputs = processor(image, ["panoptic"], return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + >>> # model predicts class_queries_logits of shape `(batch_size, num_queries)` + >>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)` + >>> class_queries_logits = outputs.class_queries_logits + >>> masks_queries_logits = outputs.masks_queries_logits + + >>> # you can pass them to processor for panoptic postprocessing + >>> predicted_panoptic_map = processor.post_process_panoptic_segmentation( + ... outputs, target_sizes=[image.size[::-1]] + ... )[0]["segmentation"] + >>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}" + '👉 Panoptic Predictions Shape: [512, 683]' + ``` + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.model( + pixel_values=pixel_values, + task_inputs=task_inputs, + text_inputs=text_inputs, + pixel_mask=pixel_mask, + output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, + output_attentions=output_attentions, + return_dict=True, + ) + + loss, loss_dict, auxiliary_predictions = None, None, None + + class_queries_logits = outputs.transformer_decoder_class_predictions + masks_queries_logits = outputs.transformer_decoder_mask_predictions + contrastive_queries_logits = outputs.transformer_decoder_contrastive_queries + auxiliary_predictions = outputs.transformer_decoder_auxiliary_predictions + text_queries = outputs.text_queries + + if mask_labels is not None and class_labels is not None: + loss_dict: Dict[str, Tensor] = self.get_loss_dict( + masks_queries_logits=masks_queries_logits, + class_queries_logits=class_queries_logits, + contrastive_queries_logits=contrastive_queries_logits, + mask_labels=mask_labels, + class_labels=class_labels, + text_queries=text_queries, + auxiliary_predictions=auxiliary_predictions, + calculate_contrastive_loss=self.config.contrastive_temperature is not None, + ) + loss = self.get_loss(loss_dict) + + output_auxiliary_logits = ( + self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits + ) + if not output_auxiliary_logits: + auxiliary_predictions = None + + output = OneFormerForUniversalSegmentationOutput( + class_queries_logits=class_queries_logits, + masks_queries_logits=masks_queries_logits, + auxiliary_predictions=auxiliary_predictions, + loss=loss, + **outputs, + ) + + if not return_dict: + output = tuple(v for v in output.values()) + if loss is not None: + output = (loss) + output + return output diff --git a/venv/lib/python3.10/site-packages/transformers/models/oneformer/processing_oneformer.py b/venv/lib/python3.10/site-packages/transformers/models/oneformer/processing_oneformer.py new file mode 100644 index 0000000000000000000000000000000000000000..9e55be5d6731c57b32bb4b4b2d11646d9842e921 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/oneformer/processing_oneformer.py @@ -0,0 +1,204 @@ +# coding=utf-8 +# Copyright 2022 SHI Labs and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for OneFormer +""" + +from typing import List + +from ...processing_utils import ProcessorMixin +from ...utils import is_torch_available + + +if is_torch_available(): + import torch + + +class OneFormerProcessor(ProcessorMixin): + r""" + Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and + [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and + tokenizer functionalities. + + Args: + image_processor ([`OneFormerImageProcessor`]): + The image processor is a required input. + tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): + The tokenizer is a required input. + max_seq_len (`int`, *optional*, defaults to 77)): + Sequence length for input text list. + task_seq_len (`int`, *optional*, defaults to 77): + Sequence length for input task token. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "OneFormerImageProcessor" + tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") + + def __init__( + self, image_processor=None, tokenizer=None, max_seq_length: int = 77, task_seq_length: int = 77, **kwargs + ): + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + self.max_seq_length = max_seq_length + self.task_seq_length = task_seq_length + + super().__init__(image_processor, tokenizer) + + def _preprocess_text(self, text_list=None, max_length=77): + if text_list is None: + raise ValueError("tokens cannot be None.") + + tokens = self.tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True) + + attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"] + + token_inputs = [] + for attn_mask, input_id in zip(attention_masks, input_ids): + token = torch.tensor(attn_mask) * torch.tensor(input_id) + token_inputs.append(token.unsqueeze(0)) + + token_inputs = torch.cat(token_inputs, dim=0) + return token_inputs + + def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): + """ + Main method to prepare for the model one or several task input(s) and image(s). This method forwards the + `task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not + `None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to + OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the + doctsring of the above two methods for more information. + + Args: + task_inputs (`str`, `List[str]`): + The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list + of strings of the template "the task is {task}". + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, + `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. Both channels-first and channels-last formats are supported. + segmentation_maps (`ImageInput`, *optional*): + The corresponding semantic segmentation maps with the pixel-wise annotations. + + (`bool`, *optional*, defaults to `True`): + Whether or not to pad images up to the largest image in a batch and create a pixel mask. + + If left to the default, will return a pixel mask that is: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + - **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + + if task_inputs is None: + raise ValueError("You have to specify the task_input. Found None.") + elif images is None: + raise ValueError("You have to specify the image. Found None.") + + if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs): + raise ValueError("task_inputs must be semantic, instance, or panoptic.") + + encoded_inputs = self.image_processor(images, task_inputs, segmentation_maps, **kwargs) + + if isinstance(task_inputs, str): + task_inputs = [task_inputs] + + if isinstance(task_inputs, List) and all(isinstance(task_input, str) for task_input in task_inputs): + task_token_inputs = [] + for task in task_inputs: + task_input = f"the task is {task}" + task_token_inputs.append(task_input) + encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) + else: + raise TypeError("Task Inputs should be a string or a list of strings.") + + if hasattr(encoded_inputs, "text_inputs"): + texts_list = encoded_inputs.text_inputs + + text_inputs = [] + for texts in texts_list: + text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) + text_inputs.append(text_input_list.unsqueeze(0)) + + encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0) + + return encoded_inputs + + def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): + """ + This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the + task_inputs. Please refer to the docstring of this method for more information. + """ + + if task_inputs is None: + raise ValueError("You have to specify the task_input. Found None.") + elif images is None: + raise ValueError("You have to specify the image. Found None.") + + if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs): + raise ValueError("task_inputs must be semantic, instance, or panoptic.") + + encoded_inputs = self.image_processor.encode_inputs(images, task_inputs, segmentation_maps, **kwargs) + + if isinstance(task_inputs, str): + task_inputs = [task_inputs] + + if isinstance(task_inputs, List) and all(isinstance(task_input, str) for task_input in task_inputs): + task_token_inputs = [] + for task in task_inputs: + task_input = f"the task is {task}" + task_token_inputs.append(task_input) + encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) + else: + raise TypeError("Task Inputs should be a string or a list of strings.") + + if hasattr(encoded_inputs, "text_inputs"): + texts_list = encoded_inputs.text_inputs + + text_inputs = [] + for texts in texts_list: + text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) + text_inputs.append(text_input_list.unsqueeze(0)) + + encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0) + + return encoded_inputs + + def post_process_semantic_segmentation(self, *args, **kwargs): + """ + This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`]. + Please refer to the docstring of this method for more information. + """ + return self.image_processor.post_process_semantic_segmentation(*args, **kwargs) + + def post_process_instance_segmentation(self, *args, **kwargs): + """ + This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`]. + Please refer to the docstring of this method for more information. + """ + return self.image_processor.post_process_instance_segmentation(*args, **kwargs) + + def post_process_panoptic_segmentation(self, *args, **kwargs): + """ + This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`]. + Please refer to the docstring of this method for more information. + """ + return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf64b9f78724dc5795a92b8238efee45aeaa0867 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..248d678a892aeb93b435a013935b0061a044d4d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/image_processing_pvt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e506f7c71126e91052f5ff0bf19d7e2b057f8ce2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt/__pycache__/modeling_pvt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d161192d81b0da3d5841da50cfedc4d75394b50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_qdqbert"] = [ + "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "QDQBertForMaskedLM", + "QDQBertForMultipleChoice", + "QDQBertForNextSentencePrediction", + "QDQBertForQuestionAnswering", + "QDQBertForSequenceClassification", + "QDQBertForTokenClassification", + "QDQBertLayer", + "QDQBertLMHeadModel", + "QDQBertModel", + "QDQBertPreTrainedModel", + "load_tf_weights_in_qdqbert", + ] + + +if TYPE_CHECKING: + from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_qdqbert import ( + QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + QDQBertForMaskedLM, + QDQBertForMultipleChoice, + QDQBertForNextSentencePrediction, + QDQBertForQuestionAnswering, + QDQBertForSequenceClassification, + QDQBertForTokenClassification, + QDQBertLayer, + QDQBertLMHeadModel, + QDQBertModel, + QDQBertPreTrainedModel, + load_tf_weights_in_qdqbert, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35b423b5ee7ccb5e2734287dcdf096d79264201e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0c9ae1401f668d9ca4096ee9ce8463e7d0a65a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/configuration_qdqbert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aca7b767967daf700757862762bfa3910e9e7ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/__pycache__/modeling_qdqbert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py new file mode 100644 index 0000000000000000000000000000000000000000..9a48424cc063c106004c12001bad5c179a0be8f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/configuration_qdqbert.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" QDQBERT model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class QDQBertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`QDQBertModel`]. It is used to instantiate an + QDQBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the BERT + [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the QDQBERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`QDQBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`QDQBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + + Examples: + + ```python + >>> from transformers import QDQBertModel, QDQBertConfig + + >>> # Initializing a QDQBERT google-bert/bert-base-uncased style configuration + >>> configuration = QDQBertConfig() + + >>> # Initializing a model from the google-bert/bert-base-uncased style configuration + >>> model = QDQBertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "qdqbert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + use_cache=True, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.type_vocab_size = type_vocab_size + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache diff --git a/venv/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py new file mode 100644 index 0000000000000000000000000000000000000000..c5e9af7025842b0a4679b20d6be9225e44a1f42b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/qdqbert/modeling_qdqbert.py @@ -0,0 +1,1737 @@ +# coding=utf-8 +# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. +# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch QDQBERT model.""" + + +import math +import os +import warnings +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_pytorch_quantization_available, + logging, + replace_return_docstrings, + requires_backends, +) +from .configuration_qdqbert import QDQBertConfig + + +logger = logging.get_logger(__name__) + +# soft dependency +if is_pytorch_quantization_available(): + try: + from pytorch_quantization import nn as quant_nn + from pytorch_quantization.nn.modules.tensor_quantizer import TensorQuantizer + except OSError: + logger.error( + "QDQBERT model are not usable since `pytorch_quantization` can't be loaded. Please try to reinstall it" + " following the instructions here:" + " https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization." + ) + +_CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased" +_CONFIG_FOR_DOC = "QDQBertConfig" + + +from ..deprecated._archive_maps import QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +def load_tf_weights_in_qdqbert(model, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + if pointer.shape != array.shape: + raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert -> QDQBert +class QDQBertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class QDQBertSelfAttention(nn.Module): + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) + self.key = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) + self.value = quant_nn.QuantLinear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + self.matmul_q_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + self.matmul_k_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + self.matmul_v_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + self.matmul_a_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul( + self.matmul_q_input_quantizer(query_layer), self.matmul_k_input_quantizer(key_layer.transpose(-1, -2)) + ) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in QDQBertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul( + self.matmul_a_input_quantizer(attention_probs), self.matmul_v_input_quantizer(value_layer) + ) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class QDQBertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + # Quantize Linear layer + self.dense = quant_nn.QuantLinear(config.hidden_size, config.hidden_size) + + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # Quantize the inputs to the residual add + self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + # Quantize the inputs to the residual add + add_local = self.add_local_input_quantizer(hidden_states) + add_residual = self.add_residual_input_quantizer(input_tensor) + hidden_states = self.LayerNorm(add_local + add_residual) + return hidden_states + + +# Based on transformers.models.bert.modeling_bert.BertAttention with Bert -> QDQBert +class QDQBertAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = QDQBertSelfAttention(config) + self.output = QDQBertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class QDQBertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + # Quantize Linear layer + self.dense = quant_nn.QuantLinear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class QDQBertOutput(nn.Module): + def __init__(self, config): + super().__init__() + # Quantize Linear layer + self.dense = quant_nn.QuantLinear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # Quantize the inputs to the residual add + self.add_local_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + self.add_residual_input_quantizer = TensorQuantizer(quant_nn.QuantLinear.default_quant_desc_input) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + # Quantize the inputs to the residual add + add_local = self.add_local_input_quantizer(hidden_states) + add_residual = self.add_residual_input_quantizer(input_tensor) + hidden_states = self.LayerNorm(add_local + add_residual) + return hidden_states + + +# Based on transformers.models.bert.modeling_bert.BertLayer with Bert -> QDQBert +class QDQBertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_len_dim = 1 + self.attention = QDQBertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = QDQBertAttention(config) + self.intermediate = QDQBertIntermediate(config) + self.output = QDQBertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = self.feed_forward_chunk(attention_output) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Based on transformers.models.bert.modeling_bert.BertEncoder with Bert -> QDQBert +class QDQBertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([QDQBertLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> QDQBert +class QDQBertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert -> QDQBert +class QDQBertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Based on transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert -> QDQBert +class QDQBertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = QDQBertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Based on transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert -> QDQBert +class QDQBertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = QDQBertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert -> QDQBert +class QDQBertOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +# Based on transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert -> QDQBert +class QDQBertPreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = QDQBertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +# Based on transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert -> QDQBert +class QDQBertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = QDQBertConfig + load_tf_weights = load_tf_weights_in_qdqbert + base_model_prefix = "bert" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +QDQBERT_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`QDQBertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +QDQBERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare QDQBERT Model transformer outputting raw hidden-states without any specific head on top.", + QDQBERT_START_DOCSTRING, +) +class QDQBertModel(QDQBertPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer: bool = True): + requires_backends(self, "pytorch_quantization") + super().__init__(config) + self.config = config + + self.embeddings = QDQBertEmbeddings(config) + self.encoder = QDQBertEncoder(config) + + self.pooler = QDQBertPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + batch_size, seq_length = input_shape + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """QDQBERT Model with a `language modeling` head on top for CLM fine-tuning.""", QDQBERT_START_DOCSTRING +) +class QDQBertLMHeadModel(QDQBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `QDQBertLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.bert = QDQBertModel(config, add_pooling_layer=False) + self.cls = QDQBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.LongTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, QDQBertLMHeadModel, QDQBertConfig + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") + >>> config = QDQBertConfig.from_pretrained("google-bert/bert-base-cased") + >>> config.is_decoder = True + >>> model = QDQBertLMHeadModel.from_pretrained("google-bert/bert-base-cased", config=config) + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids: Optional[torch.LongTensor], + past_key_values=None, + attention_mask: Optional[torch.Tensor] = None, + **model_kwargs, + ): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings("""QDQBERT Model with a `language modeling` head on top.""", QDQBERT_START_DOCSTRING) +class QDQBertForMaskedLM(QDQBertPreTrainedModel): + _tied_weights_keys = ["predictions.decoder.weight", "predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `QDQBertForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.bert = QDQBertModel(config, add_pooling_layer=False) + self.cls = QDQBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor] = None, **model_kwargs + ): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + if self.config.pad_token_id is None: + raise ValueError("The PAD token should be defined for generation") + + attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) + dummy_token = torch.full( + (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """Bert Model with a `next sentence prediction (classification)` head on top.""", + QDQBERT_START_DOCSTRING, +) +class QDQBertForNextSentencePrediction(QDQBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = QDQBertModel(config) + self.cls = QDQBertOnlyNSPHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple, NextSentencePredictorOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see `input_ids` docstring). Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, QDQBertForNextSentencePrediction + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") + >>> model = QDQBertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." + >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") + + >>> outputs = model(**encoding, labels=torch.LongTensor([1])) + >>> logits = outputs.logits + >>> assert logits[0, 0] < logits[0, 1] # next sentence was random + ```""" + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" + " `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + QDQBERT_START_DOCSTRING, +) +class QDQBertForSequenceClassification(QDQBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.bert = QDQBertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + QDQBERT_START_DOCSTRING, +) +class QDQBertForMultipleChoice(QDQBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = QDQBertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + QDQBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + QDQBERT_START_DOCSTRING, +) +class QDQBertForTokenClassification(QDQBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = QDQBertModel(config, add_pooling_layer=False) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + QDQBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + QDQBERT_START_DOCSTRING, +) +class QDQBertForQuestionAnswering(QDQBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = QDQBertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(QDQBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/tvp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63c0bd2717447137661a4c01596ba0f581f3504d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/tvp/__init__.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_tvp": [ + "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TvpConfig", + ], + "processing_tvp": ["TvpProcessor"], +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_tvp"] = ["TvpImageProcessor"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tvp"] = [ + "TVP_PRETRAINED_MODEL_ARCHIVE_LIST", + "TvpModel", + "TvpPreTrainedModel", + "TvpForVideoGrounding", + ] + +if TYPE_CHECKING: + from .configuration_tvp import ( + TVP_PRETRAINED_CONFIG_ARCHIVE_MAP, + TvpConfig, + ) + from .processing_tvp import TvpProcessor + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_tvp import TvpImageProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tvp import ( + TVP_PRETRAINED_MODEL_ARCHIVE_LIST, + TvpForVideoGrounding, + TvpModel, + TvpPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5822c16f649ef57d30579be1b66b705933efcf87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28441e80ef0b2e9c4ab9681a546973fc45e27403 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/configuration_tvp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..562fed6813d92f13402ebd1fa3134b0e82e25bfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/image_processing_tvp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccf5316e55703556e584d7ccf89553f2fa65402d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/modeling_tvp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9341adcf2d09355b3ba7c88fbde5a27615adcdb Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tvp/__pycache__/processing_tvp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py b/venv/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py new file mode 100644 index 0000000000000000000000000000000000000000..85b7ac6a41cbccccbfb1fb62becbcac2f4f2b63e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/tvp/configuration_tvp.py @@ -0,0 +1,201 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TVP model configuration""" + +import copy + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import TVP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class TvpConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`TvpModel`]. It is used to instantiate an Tvp + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Tvp + [Intel/tvp-base](https://huggingface.co/Intel/tvp-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + backbone_config (`PretrainedConfig` or `dict`, *optional*): + The configuration of the backbone model. + backbone (`str`, *optional*): + Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this + will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` + is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. + use_pretrained_backbone (`bool`, *optional*, defaults to `False`): + Whether to use pretrained weights for the backbone. + use_timm_backbone (`bool`, *optional*, defaults to `False`): + Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers + library. + backbone_kwargs (`dict`, *optional*): + Keyword arguments to be passed to AutoBackbone when loading from a checkpoint + e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. + distance_loss_weight (`float`, *optional*, defaults to 1.0): + The weight of distance loss. + duration_loss_weight (`float`, *optional*, defaults to 0.1): + The weight of duration loss. + visual_prompter_type (`str`, *optional*, defaults to `"framepad"`): + Visual prompt type. The type of padding. Framepad means padding on each frame. Should be one of "framepad" + or "framedownpad" + visual_prompter_apply (`str`, *optional*, defaults to `"replace"`): + The way of applying visual prompt. Replace means use the value of prompt to change the original value in + visual inputs. Should be one of "replace", or "add", or "remove". + visual_prompt_size (`int`, *optional*, defaults to 96): + The size of visual prompt. + max_img_size (`int`, *optional*, defaults to 448): + The maximum size of frame. + num_frames (`int`, *optional*, defaults to 48): + The number of frames extracted from a video. + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the Tvp text model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`TvpModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + max_grid_col_position_embeddings (`int`, *optional*, defaults to 100): + The largest number of horizontal patches from a video frame. + max_grid_row_position_embeddings (`int`, *optional*, defaults to 100): + The largest number of vertical patches from a video frame. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability of hidden layers. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability of attention layers. + """ + + model_type = "tvp" + + def __init__( + self, + backbone_config=None, + backbone=None, + use_pretrained_backbone=False, + use_timm_backbone=False, + backbone_kwargs=None, + distance_loss_weight=1.0, + duration_loss_weight=0.1, + visual_prompter_type="framepad", + visual_prompter_apply="replace", + visual_prompt_size=96, + max_img_size=448, + num_frames=48, + vocab_size=30522, + hidden_size=768, + intermediate_size=3072, + num_hidden_layers=12, + num_attention_heads=12, + max_position_embeddings=512, + max_grid_col_position_embeddings=100, + max_grid_row_position_embeddings=100, + hidden_dropout_prob=0.1, + hidden_act="gelu", + layer_norm_eps=1e-12, + initializer_range=0.02, + attention_probs_dropout_prob=0.1, + **kwargs, + ): + super().__init__(**kwargs) + if use_pretrained_backbone: + raise ValueError("Pretrained backbones are not supported yet.") + + if backbone_config is not None and backbone is not None: + raise ValueError("You can't specify both `backbone` and `backbone_config`.") + + if backbone_config is None and backbone is None: + logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") + backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: + raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + + self.backbone_config = backbone_config + self.backbone = backbone + self.use_pretrained_backbone = use_pretrained_backbone + self.use_timm_backbone = use_timm_backbone + self.backbone_kwargs = backbone_kwargs + self.distance_loss_weight = distance_loss_weight + self.duration_loss_weight = duration_loss_weight + self.visual_prompter_type = visual_prompter_type + self.visual_prompter_apply = visual_prompter_apply + self.visual_prompt_size = visual_prompt_size + self.max_img_size = max_img_size + self.num_frames = num_frames + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.max_position_embeddings = max_position_embeddings + self.max_grid_col_position_embeddings = max_grid_col_position_embeddings + self.max_grid_row_position_embeddings = max_grid_row_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.hidden_dropout_prob = hidden_dropout_prob + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.attention_probs_dropout_prob = attention_probs_dropout_prob + + @classmethod + def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): + """Instantiate a [`TvpConfig`] (or a derived class) from a pre-trained backbone model configuration. + + Args: + backbone_config ([`PretrainedConfig`]): + The backbone configuration. + Returns: + [`TvpConfig`]: An instance of a configuration object + """ + return cls(backbone_config=backbone_config, **kwargs) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + if output["backbone_config"] is not None: + output["backbone_config"] = self.backbone_config.to_dict() + output["model_type"] = self.__class__.model_type + return output diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py b/venv/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py new file mode 100644 index 0000000000000000000000000000000000000000..18600ee5fbe7f3c029c60c4b15df1defe43cf6aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/tvp/image_processing_tvp.py @@ -0,0 +1,502 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for TVP.""" + +from typing import Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + PaddingMode, + flip_channel_order, + pad, + resize, + to_channel_dimension_format, +) +from ...image_utils import ( + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + is_valid_image, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import TensorType, is_vision_available, logging + + +if is_vision_available(): + import PIL + + +logger = logging.get_logger(__name__) + + +# Copied from transformers.models.vivit.image_processing_vivit.make_batched +def make_batched(videos) -> List[List[ImageInput]]: + if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): + return videos + + elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): + return [videos] + + elif is_valid_image(videos): + return [[videos]] + + raise ValueError(f"Could not make batched video from {videos}") + + +def get_resize_output_image_size( + input_image: np.ndarray, + max_size: int = 448, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[int, int]: + height, width = get_image_size(input_image, input_data_format) + if height >= width: + ratio = width * 1.0 / height + new_height = max_size + new_width = new_height * ratio + else: + ratio = height * 1.0 / width + new_width = max_size + new_height = new_width * ratio + size = (int(new_height), int(new_width)) + + return size + + +class TvpImageProcessor(BaseImageProcessor): + r""" + Constructs a Tvp image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`): + Size of the output image after resizing. The longest edge of the image will be resized to + `size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overriden by + `size` in the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` + parameter in the `preprocess` method. + crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter + in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. + pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the + `preprocess` method. + constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): + The fill value to use when padding the image. + pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`): + Use what kind of mode in padding. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + do_flip_channel_order (`bool`, *optional*, defaults to `True`): + Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order` + parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_center_crop: bool = True, + crop_size: Dict[str, int] = None, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_pad: bool = True, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = 0, + pad_mode: PaddingMode = PaddingMode.CONSTANT, + do_normalize: bool = True, + do_flip_channel_order: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"longest_edge": 448} + crop_size = crop_size if crop_size is not None else {"height": 448, "width": 448} + pad_size = pad_size if pad_size is not None else {"height": 448, "width": 448} + + self.do_resize = do_resize + self.size = size + self.do_center_crop = do_center_crop + self.crop_size = crop_size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_pad = do_pad + self.pad_size = pad_size + self.constant_values = constant_values + self.pad_mode = pad_mode + self.do_normalize = do_normalize + self.do_flip_channel_order = do_flip_channel_order + self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + self._valid_processor_keys = [ + "videos", + "do_resize", + "size", + "resample", + "do_center_crop", + "crop_size", + "do_rescale", + "rescale_factor", + "do_pad", + "pad_size", + "constant_values", + "pad_mode", + "do_normalize", + "do_flip_channel_order", + "image_mean", + "image_std", + "return_tensors", + "data_format", + "input_data_format", + ] + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BILINEAR, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will + have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its + longest edge of length `s` while keeping the aspect ratio of the original image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + size = get_size_dict(size, default_to_square=False) + if "height" in size and "width" in size: + output_size = (size["height"], size["width"]) + elif "longest_edge" in size: + output_size = get_resize_output_image_size(image, size["longest_edge"], input_data_format) + else: + raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}") + + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def pad_image( + self, + image: np.ndarray, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = 0, + pad_mode: PaddingMode = PaddingMode.CONSTANT, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Pad an image with zeros to the given size. + + Args: + image (`np.ndarray`): + Image to pad. + pad_size (`Dict[str, int]`) + Size of the output image with pad. + constant_values (`Union[float, Iterable[float]]`) + The fill value to use when padding the image. + pad_mode (`PaddingMode`) + The pad mode, default to PaddingMode.CONSTANT + data_format (`ChannelDimension` or `str`, *optional*) + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + height, width = get_image_size(image, channel_dim=input_data_format) + max_height = pad_size.get("height", height) + max_width = pad_size.get("width", width) + + pad_right, pad_bottom = max_width - width, max_height - height + if pad_right < 0 or pad_bottom < 0: + raise ValueError("The padding size must be greater than image size") + + padding = ((0, pad_bottom), (0, pad_right)) + padded_image = pad( + image, + padding, + mode=pad_mode, + constant_values=constant_values, + data_format=data_format, + input_data_format=input_data_format, + ) + + return padded_image + + def _preprocess_image( + self, + image: ImageInput, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: Dict[str, int] = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_pad: bool = True, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = None, + pad_mode: PaddingMode = None, + do_normalize: bool = None, + do_flip_channel_order: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """Preprocesses a single image.""" + + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_pad=do_pad, + size_divisibility=pad_size, # here the pad() method simply requires the pad_size argument. + do_center_crop=do_center_crop, + crop_size=crop_size, + do_resize=do_resize, + size=size, + resample=resample, + ) + + # All transformations expect numpy arrays. + image = to_numpy_array(image) + + if do_resize: + image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + + if do_center_crop: + image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) + + if do_rescale: + image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + + if do_normalize: + image = self.normalize( + image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format + ) + + if do_pad: + image = self.pad_image( + image=image, + pad_size=pad_size, + constant_values=constant_values, + pad_mode=pad_mode, + input_data_format=input_data_format, + ) + + # the pretrained checkpoints assume images are BGR, not RGB + if do_flip_channel_order: + image = flip_channel_order(image=image, input_data_format=input_data_format) + + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + + return image + + def preprocess( + self, + videos: Union[ImageInput, List[ImageInput], List[List[ImageInput]]], + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_center_crop: bool = None, + crop_size: Dict[str, int] = None, + do_rescale: bool = None, + rescale_factor: float = None, + do_pad: bool = None, + pad_size: Dict[str, int] = None, + constant_values: Union[float, Iterable[float]] = None, + pad_mode: PaddingMode = None, + do_normalize: bool = None, + do_flip_channel_order: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + videos (`ImageInput` or `List[ImageInput]` or `List[List[ImageInput]]`): + Frames to preprocess. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after applying resize. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only + has an effect if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): + Whether to centre crop the image. + crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): + Size of the image after applying the centre crop. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. + pad_size (`Dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the + `preprocess` method. + constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): + The fill value to use when padding the image. + pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"): + Use what kind of mode in padding. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`): + Whether to flip the channel order of the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the inferred channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_pad = do_pad if do_pad is not None else self.do_pad + pad_size = pad_size if pad_size is not None else self.pad_size + constant_values = constant_values if constant_values is not None else self.constant_values + pad_mode = pad_mode if pad_mode else self.pad_mode + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + do_flip_channel_order = ( + do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order + ) + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + crop_size = crop_size if crop_size is not None else self.crop_size + crop_size = get_size_dict(crop_size, param_name="crop_size") + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + if not valid_images(videos): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + videos = make_batched(videos) + + videos = [ + np.array( + [ + self._preprocess_image( + image=img, + do_resize=do_resize, + size=size, + resample=resample, + do_center_crop=do_center_crop, + crop_size=crop_size, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_pad=do_pad, + pad_size=pad_size, + constant_values=constant_values, + pad_mode=pad_mode, + do_normalize=do_normalize, + do_flip_channel_order=do_flip_channel_order, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + ) + for img in video + ] + ) + for video in videos + ] + + data = {"pixel_values": videos} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py b/venv/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py new file mode 100644 index 0000000000000000000000000000000000000000..da8e85da74cfbdca71897a204788c98431a00135 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/tvp/modeling_tvp.py @@ -0,0 +1,892 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch TVP Model""" + +import math +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import prune_linear_layer +from ...utils import logging +from ...utils.backbone_utils import load_backbone +from .configuration_tvp import TvpConfig + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import TVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class TvpVideoGroundingOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Temporal-Distance IoU loss for video grounding. + logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the + input texts. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +class TvpLoss(nn.Module): + """ + This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute + hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched + ground-truth / prediction (supervise class and box). + + Args: + losses (`List[str]`): + List of all the losses to be applied. + """ + + def __init__(self, losses): + super().__init__() + self.loss_map = { + "iou": self.loss_iou, + "distance": self.loss_distance, + "duration": self.loss_duration, + } + for loss in losses: + if loss not in self.loss_map: + raise ValueError(f"Loss {loss} not supported") + + self.losses = losses + + def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the intersection over union. + """ + inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time) + union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time) + iou = 1 - inter.clamp(min=0) / union + + return iou + + def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the distance of mid points. + """ + mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0) + mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0) + distance_diff = torch.div( + torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration + ).clamp(min=0.2) + + return distance_diff + + def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration): + """ + Measure the difference of duration. + """ + duration_candidates = torch.sub(candidates_end_time, candidates_start_time) + duration_groundtruth = torch.sub(end_time, start_time) + duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration)) + duration_diff = duration_diff.clamp(min=0.4) + + return duration_diff + + def forward(self, logits, labels): + """ + This performs the loss computation. + + Args: + logits (`torch.FloatTensor`): + The output logits of head module. + labels (`List[torch.FloatTensor]`): + List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration. + """ + duration, start_time, end_time = labels + candidates = torch.mul(logits, duration) + candidates_start_time, candidates_end_time = candidates[:, 0].float(), candidates[:, 1].float() + + losses_dict = {} + for loss in self.losses: + losses_dict.update( + {loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)} + ) + + return losses_dict + + +class TvpVisionModel(nn.Module): + def __init__(self, config): + super().__init__() + self.backbone = load_backbone(config) + self.grid_encoder_conv = nn.Conv2d( + config.backbone_config.hidden_sizes[-1], + config.hidden_size, + kernel_size=3, + stride=1, + padding=1, + groups=1, + bias=False, + ) + + def forward(self, pixel_values): + batch_size, num_frames, num_channels, height, width = pixel_values.shape + # (batch_size * num_frames, num_channels, height, width) + pixel_values = pixel_values.view(batch_size * num_frames, num_channels, height, width) + grid_feat_outputs = self.backbone(pixel_values)["feature_maps"][0] + grid = self.grid_encoder_conv(grid_feat_outputs) + grid = nn.functional.max_pool2d(grid, kernel_size=2, stride=2) + grid = nn.functional.relu(grid, inplace=True) + new_channel, new_height, new_width = grid.shape[-3:] + # (batch_size, num_frames, num_channels, height, width) + grid = grid.view(batch_size, num_frames, new_channel, new_height, new_width) + # (batch_size, num_frames, height, width, num_channels) + grid = grid.permute(0, 1, 3, 4, 2) + return grid + + +class TvpVisualInputEmbedding(nn.Module): + """ + Takes input of both image and video (multi-frame) + """ + + def __init__(self, config): + super().__init__() + # sequence embedding + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.row_position_embeddings = nn.Embedding(config.max_grid_row_position_embeddings, config.hidden_size) + self.col_position_embeddings = nn.Embedding(config.max_grid_col_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(1, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def add_2d_positional_embeddings(self, grid): + """ + Args: + grid: (batch_size, height, width, hidden_dim) + Returns: + grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim) + """ + batch_size, height, width, hidden_dim = grid.shape + + # add row-wise position embeddings + row_position_ids = torch.arange(height, dtype=torch.long, device=grid.device) # (height, ) + row_position_embeddings = self.row_position_embeddings(row_position_ids) # (height, hidden_dim) + row_shape = (1,) * (len(grid.shape) - 3) + (height, 1, hidden_dim) # (1, height, 1, hidden_dim) + grid = grid + row_position_embeddings.view(*row_shape) # broadcast automatically + + # add column-wise position embeddings + col_position_ids = torch.arange(width, dtype=torch.long, device=grid.device) # (width, ) + col_position_embeddings = self.col_position_embeddings(col_position_ids) # (width, hidden_dim) + col_shape = (batch_size, 1, width, hidden_dim) # (1, 1, width, hidden_dim) + return grid + col_position_embeddings.view(*col_shape) # broadcast automatically + + def forward(self, grid): + """ + Args: + grid: Array of shape (batch_size, num_frames, height, width, num_channels). + It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note, + num_frames can be 1 + + Returns: + embeddings: The embedding of grid with size (batch_size, height*width, num_channels) + + """ + batch_size, num_frames, height, width, num_channels = grid.shape + # temporal mean pooling, (batch_size, height, width, hidden_size) + grid = grid.mean(1) + grid = self.add_2d_positional_embeddings(grid) + # image token sequence, (batch_size, height*width, num_channels) + visual_tokens = grid.view(batch_size, -1, num_channels) + visual_tokens_shape = visual_tokens.shape[:-1] + device = visual_tokens.device + + # image token type embeddings. + token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = visual_tokens + token_type_embeddings + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class TvpTextInputEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + device = input_ids.device if input_ids is not None else inputs_embeds.device + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand(input_shape) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class TvpAttention(nn.Module): + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + self.attn_dropout = nn.Dropout(config.attention_probs_dropout_prob) + + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + mask = torch.ones(self.num_attention_heads, self.attention_head_size) + heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads + for head in heads: + # Compute how many pruned heads are before the head and move the index accordingly + head = head - sum(1 if h < head else 0 for h in self.pruned_heads) + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index = torch.arange(len(mask))[mask].long() + + # Prune linear layers + self.query = prune_linear_layer(self.query, index) + self.key = prune_linear_layer(self.key, index) + self.value = prune_linear_layer(self.value, index) + self.dense = prune_linear_layer(self.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.num_attention_heads = self.num_attention_heads - len(heads) + self.all_head_size = self.attention_head_size * self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def _reshape(self, tensor: torch.Tensor, sequence_length: int, batch_size: int): + return ( + tensor.view(batch_size, sequence_length, self.num_attention_heads, self.attention_head_size) + .transpose(1, 2) + .contiguous() + ) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_attentions: Optional[bool] = None, + ): + batch_size, sequence_length = hidden_states.shape[:2] + mixed_query_layer = self.query(hidden_states) + + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self._reshape(mixed_query_layer, sequence_length, batch_size) + key_layer = self._reshape(mixed_key_layer, sequence_length, batch_size) + value_layer = self._reshape(mixed_value_layer, sequence_length, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attn_dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + attn_output = torch.matmul(attention_probs, value_layer) + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(batch_size, sequence_length, self.all_head_size) + + attn_output = self.dense(attn_output) + attn_output = self.dropout(attn_output) + attn_output = self.layer_norm(attn_output + hidden_states) + # add attentions if we output them + outputs = (attn_output, attention_probs) if output_attentions else (attn_output,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Tvp +class TvpIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class TvpOutputLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.layer_norm(hidden_states + input_tensor) + return hidden_states + + +class TvpEncodeLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = TvpAttention(config) + self.intermediate = TvpIntermediate(config) + self.output = TvpOutputLayer(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + output_attentions: Optional[bool] = None, + ): + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + outputs + return outputs + + +class TvpEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([TvpEncodeLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + return_dict = return_dict if return_dict is not None else self.config.return_dict + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + all_hidden_states = () + all_attentions = () + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + (head_mask[i] if head_mask is not None else None), + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], output_attentions) + + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + outputs = (hidden_states,) + if output_hidden_states: + outputs = outputs + (all_hidden_states,) + if output_attentions: + outputs = outputs + (all_attentions,) + return outputs # last-layer hidden state, (all hidden states), (all attentions) + + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states if output_hidden_states else None, + attentions=all_attentions if output_attentions else None, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Tvp +class TvpPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class TvpPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + config_class = TvpConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + +TVP_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`TvpConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +TVP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input + IDs?](../glossary#input-ids) + + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`TvpImageProcessor`]. See [`TvpImageProcessor.__call__`] + for details. + + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + [What are attention masks?](../glossary#attention-mask) + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class TvpFrameDownPadPrompter(nn.Module): + """ + Pad frames extracted from videos only at the bottom. + """ + + def __init__(self, config): + if config.visual_prompter_apply not in ("add", "replace", "remove"): + raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") + + super().__init__() + self.visual_prompt_size = config.visual_prompt_size + self.frame_num = config.frame_num + self.max_img_size = config.max_img_size + self.visual_prompter_apply = config.visual_prompter_apply + + self.pad_down = nn.Parameter( + torch.randn([1, config.frame_num, 3, config.visual_prompt_size, config.max_img_size]) + ) + + def forward(self, pixel_values): + if self.visual_prompter_apply != "add": + visual_prompt_mask = torch.ones( + [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device + ) + visual_prompt_mask[self.max_img_size - self.visual_prompt_size : self.max_img_size, :] = 0.0 + pixel_values *= visual_prompt_mask + if self.visual_prompter_apply != "remove": + prompt = torch.zeros( + [pixel_values.shape[0], pixel_values.shape[1], 3, self.max_img_size, self.max_img_size], + device=pixel_values.device, + ) + start_point = self.max_img_size - self.visual_prompt_size + prompt[:, :, :, start_point : self.max_img_size, :] = self.pad_down + pixel_values += prompt.to(pixel_values.dtype) + return pixel_values + + +class TvpFramePadPrompter(nn.Module): + """ + Pad frames extracted from videos in the surroundings. + """ + + def __init__(self, config): + if config.visual_prompter_apply not in ("add", "replace", "remove"): + raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)") + + super().__init__() + self.num_frames = config.num_frames + self.max_img_size = config.max_img_size + self.visual_prompter_apply = config.visual_prompter_apply + + self.base_size = config.max_img_size - config.visual_prompt_size * 2 + self.pad_up = nn.Parameter( + torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) + ) + self.pad_down = nn.Parameter( + torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size]) + ) + self.pad_left = nn.Parameter( + torch.randn( + [ + 1, + config.num_frames, + 3, + config.max_img_size - config.visual_prompt_size * 2, + config.visual_prompt_size, + ] + ) + ) + self.pad_right = nn.Parameter( + torch.randn( + [ + 1, + config.num_frames, + 3, + config.max_img_size - config.visual_prompt_size * 2, + config.visual_prompt_size, + ] + ) + ) + + def forward(self, pixel_values): + if self.visual_prompter_apply not in ("add", "remove", "replace"): + raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}") + if self.visual_prompter_apply in ("replace", "remove"): + visual_prompt_mask = torch.ones( + [self.max_img_size, self.max_img_size], dtype=pixel_values.dtype, device=pixel_values.device + ) + pixel_values *= visual_prompt_mask + if self.visual_prompter_apply in ("replace", "add"): + base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device) + prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4) + prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3) + prompt = torch.cat(pixel_values.size(0) * [prompt]) + pixel_values = pixel_values + prompt.to(pixel_values.dtype) + return pixel_values + + +TVP_PROMPTER_CLASSES_MAPPING = { + "framedownpad": TvpFrameDownPadPrompter, + "framepad": TvpFramePadPrompter, +} + + +@add_start_docstrings( + "The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on" " top.", + TVP_START_DOCSTRING, +) +class TvpModel(TvpPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + self.vision_model = TvpVisionModel(config) + self.embeddings = TvpTextInputEmbeddings(config) + self.visual_embeddings = TvpVisualInputEmbedding(config) + self.encoder = TvpEncoder(config) + self.pooler = TvpPooler(config) + self.text_prompt = nn.Parameter(torch.randn([1, 10, config.hidden_size])) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if config.visual_prompter_type not in TVP_PROMPTER_CLASSES_MAPPING: + raise ValueError("`visual_prompter_type` must be in (framedownpad, framepad)") + self.visual_prompter = TVP_PROMPTER_CLASSES_MAPPING[config.visual_prompter_type](config) + + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=TvpConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + + Examples: + ```python + >>> import torch + >>> from transformers import AutoConfig, AutoTokenizer, TvpModel + + >>> model = TvpModel.from_pretrained("Jiqing/tiny-random-tvp") + + >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") + + >>> pixel_values = torch.rand(1, 1, 3, 448, 448) + >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") + >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) + ```""" + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # Add visual prompt, it compensates for the spatiotemporal information loss in 2D visual features. + pixel_values = self.vision_model(self.visual_prompter(pixel_values)) + # (batch_size, sequence_length, hidden_size) + text_embedding_output = self.embeddings(input_ids=input_ids) + # (batch_size, visual_sequence_length, hidden_size) + visual_embedding_output = self.visual_embeddings(pixel_values) + if attention_mask is not None: + # (batch_size, visual_sequence_length) + visual_attention_mask = attention_mask.new_ones(visual_embedding_output.shape[:2]) + pt_mask = torch.ones(attention_mask.shape[0], 10).to( + device=attention_mask.device, dtype=attention_mask.dtype + ) + attention_mask = torch.cat([pt_mask, attention_mask, visual_attention_mask], dim=-1) + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size()).to(input_ids.device) + text_prompt = self.text_prompt.expand(text_embedding_output.shape[0], -1, -1) + # (batch_size, sequence_length + visual_sequence_length, hidden_size) + embedding_output = torch.cat([text_prompt, text_embedding_output, visual_embedding_output], dim=1) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=attention_mask, + head_mask=self.get_head_mask(head_mask, self.config.num_hidden_layers), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + last_hidden_state = self.dropout(last_hidden_state) + pooled_output = self.dropout(pooled_output) + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class TvpVideoGroundingHead(nn.Module): + def __init__(self, config): + super().__init__() + self.layer_0 = nn.Linear(config.hidden_size, config.hidden_size * 2) + self.layer_1 = nn.Linear(config.hidden_size * 2, 2) + self.activation_0 = nn.ReLU() + self.activation_1 = nn.Sigmoid() + + def forward(self, pooler_output): + logits = self.activation_0(self.layer_0(pooler_output)) + logits = self.activation_1(self.layer_1(logits)) + return logits + + +@add_start_docstrings( + """ + Tvp Model with a video grounding head on top computing IoU, distance, and duration loss. + """, + TVP_START_DOCSTRING, +) +class TvpForVideoGrounding(TvpPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + self.model = TvpModel(config) + self.video_grounding_head = TvpVideoGroundingHead(config) + + self.post_init() + + @add_start_docstrings_to_model_forward(TVP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TvpVideoGroundingOutput, config_class=TvpConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.LongTensor] = None, + labels: Tuple[torch.Tensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + labels (`torch.FloatTensor` of shape `(batch_size, 3)`, *optional*): + The labels contains duration, start time, and end time of the video corresponding to the text. + Returns: + + Examples: + ```python + >>> import torch + >>> from transformers import AutoConfig, AutoTokenizer, TvpForVideoGrounding + + >>> model = TvpForVideoGrounding.from_pretrained("Jiqing/tiny-random-tvp") + + >>> tokenizer = AutoTokenizer.from_pretrained("Jiqing/tiny-random-tvp") + + >>> pixel_values = torch.rand(1, 1, 3, 448, 448) + >>> text_inputs = tokenizer("This is an example input", return_tensors="pt") + >>> output = model(text_inputs.input_ids, pixel_values, text_inputs.attention_mask) + ```""" + return_dict = return_dict if return_dict is not None else self.config.return_dict + outputs = self.model( + input_ids, + pixel_values, + attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooler_output = outputs[1] + + logits = self.video_grounding_head(pooler_output) + + loss = None + if labels is not None: + criterion = TvpLoss(["iou", "distance", "duration"]) + criterion.to(self.device) + loss_dict = criterion(logits, labels) + loss = ( + loss_dict["iou"] + + self.config.distance_loss_weight * loss_dict["distance"] + + self.config.duration_loss_weight * loss_dict["duration"] + ) + + if not return_dict: + outputs = (logits,) + outputs[2:] + if loss is not None: + outputs = (loss,) + outputs + return outputs + + return TvpVideoGroundingOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py b/venv/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py new file mode 100644 index 0000000000000000000000000000000000000000..4e27399ab8053fdd62c42edacc2c2d1dd8586920 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/tvp/processing_tvp.py @@ -0,0 +1,154 @@ +# coding=utf-8 +# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License=, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing=, software +# distributed under the License is distributed on an "AS IS" BASIS=, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for TVP. +""" + + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class TvpProcessor(ProcessorMixin): + r""" + Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor. + + [`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the + [`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information. + + Args: + image_processor ([`TvpImageProcessor`], *optional*): + The image processor is a required input. + tokenizer ([`BertTokenizerFast`], *optional*): + The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "TvpImageProcessor" + tokenizer_class = ("BertTokenizer", "BertTokenizerFast") + + def __init__(self, image_processor=None, tokenizer=None, **kwargs): + if image_processor is None: + raise ValueError("You need to specify an `image_processor`.") + if tokenizer is None: + raise ValueError("You need to specify a `tokenizer`.") + + super().__init__(image_processor, tokenizer) + + def __call__(self, text=None, videos=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to + TvpImageProcessor's [`~TvpImageProcessor.__call__`] if `videos` is not `None`. Please refer to the doctsring of + the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,: + `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list + of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, + each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of + channels. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`. + """ + + max_text_length = kwargs.pop("max_text_length", None) + + if text is None and videos is None: + raise ValueError("You have to specify either text or videos. Both cannot be none.") + + encoding = {} + if text is not None: + textual_input = self.tokenizer.batch_encode_plus( + text, + truncation=True, + padding="max_length", + max_length=max_text_length, + pad_to_max_length=True, + return_tensors=return_tensors, + return_token_type_ids=False, + **kwargs, + ) + encoding.update(textual_input) + + if videos is not None: + image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs) + encoding.update(image_features) + + return BatchEncoding(data=encoding, tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + def post_process_video_grounding(self, logits, video_durations): + """ + Compute the time of the video. + + Args: + logits (`torch.Tensor`): + The logits output of TvpForVideoGrounding. + video_durations (`float`): + The video's duration. + + Returns: + start (`float`): + The start time of the video. + end (`float`): + The end time of the video. + """ + start, end = ( + round(logits.tolist()[0][0] * video_durations, 1), + round(logits.tolist()[0][1] * video_durations, 1), + ) + + return start, end + + @property + # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names + def model_input_names(self): + tokenizer_input_names = self.tokenizer.model_input_names + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))