diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..c5e78629cd4afcb18bdfec892e742c1dba4a0dfe --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65404354c391879d0e09bf57e328fa8f9818e63acd3e66d1fac8b05c273b41d4 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..7718ece2800bbd1931bc72ab9e17fb7ceb00b382 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cc604eac633693be1e52ff5a0e110ba47bce0f325db59a591b898da11c0f8a7 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..fdad688cb492d33dc9fd2fcb027b92909985c737 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff7ad7d0aefcb10d0fdd242fd7d979a4c0c50f990fb968c2e403b7410588b0f0 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f15d3212d31153ffe9249afb940335589a5e946a --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c10e9aec535e1b5c70d83f837c268cf1ffb619c95caa887b26009464308d3bef +size 50332843 diff --git a/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..eee07815809db15cbde88a2cdad1546065d76af7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:987b036cd0ddf213add9c3941dd71cd8ab78faa724b796e99cbd630d4a0443f3 +size 50332749 diff --git a/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5e57195a3d3a311bc485fcbe75e0ddcc9e5aa083 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60aa675e598d8c5d825f2700d6b0a3841d6db7410af126179fcd26a7b403d81c +size 33555533 diff --git a/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..0011010e1c877bfc767407c90508da05c3d2755a --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-22 18:43:16,494 INFO StreamThr :1063 [internal.py:wandb_internal():85] W&B internal server running at pid: 1063, started at: 2024-05-22 18:43:16.492393 +2024-05-22 18:43:16,499 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:43:16,499 INFO WriterThread:1063 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb +2024-05-22 18:43:16,502 DEBUG SenderThread:1063 [sender.py:send():378] send: header +2024-05-22 18:43:16,505 DEBUG SenderThread:1063 [sender.py:send():378] send: run +2024-05-22 18:43:16,841 INFO SenderThread:1063 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files +2024-05-22 18:43:16,841 INFO SenderThread:1063 [sender.py:_start_run_threads():1123] run started: 86p21jxx with start time 1716403396.49225 +2024-05-22 18:43:16,846 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:43:16,846 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:43:16,962 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:43:16,964 DEBUG HandlerThread:1063 [system_info.py:__init__():26] System info init +2024-05-22 18:43:16,964 DEBUG HandlerThread:1063 [system_info.py:__init__():41] System info init done +2024-05-22 18:43:16,964 INFO HandlerThread:1063 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:43:16,964 INFO SystemMonitor:1063 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:43:16,964 INFO HandlerThread:1063 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:43:16,971 INFO SystemMonitor:1063 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:43:16,971 INFO SystemMonitor:1063 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:43:16,978 INFO SystemMonitor:1063 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:43:16,979 INFO SystemMonitor:1063 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:43:17,073 DEBUG HandlerThread:1063 [system_info.py:probe():150] Probing system +2024-05-22 18:43:17,077 DEBUG HandlerThread:1063 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:43:17,086 ERROR HandlerThread:1063 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_info.py:probe():198] Probing system done +2024-05-22 18:43:17,086 DEBUG HandlerThread:1063 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:43:17.073766', 'startedAt': '2024-05-22T18:43:16.472541', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4363875000004, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.63845443725586}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:43:17,087 INFO HandlerThread:1063 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:43:17,087 INFO HandlerThread:1063 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:43:17,090 INFO HandlerThread:1063 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:43:17,095 DEBUG SenderThread:1063 [sender.py:send():378] send: files +2024-05-22 18:43:17,095 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:43:17,292 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:43:17,292 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:43:17,293 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:43:17,294 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:43:17,410 DEBUG SenderThread:1063 [sender.py:send():378] send: telemetry +2024-05-22 18:43:17,651 INFO wandb-upload_0:1063 [upload_job.py:push():130] Uploaded file /tmp/tmpenbd99b_wandb/78j0rr5j-wandb-metadata.json +2024-05-22 18:43:17,843 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log +2024-05-22 18:43:17,844 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-metadata.json +2024-05-22 18:43:17,844 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt +2024-05-22 18:43:19,843 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log +2024-05-22 18:43:22,415 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:43:27,819 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:43:27,851 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log +2024-05-22 18:43:28,335 DEBUG SenderThread:1063 [sender.py:send():378] send: exit +2024-05-22 18:43:28,335 INFO SenderThread:1063 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:43:28,336 INFO SenderThread:1063 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:43:28,337 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:43:28,337 INFO SenderThread:1063 [sender.py:send_exit():593] send defer +2024-05-22 18:43:28,337 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,337 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:43:28,337 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:43:28,338 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,338 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:43:28,338 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:43:28,338 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:43:28,338 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,338 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:43:28,338 INFO HandlerThread:1063 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:43:28,338 DEBUG SystemMonitor:1063 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:43:28,339 INFO HandlerThread:1063 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send():378] send: stats +2024-05-22 18:43:28,340 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,340 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:43:28,340 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:43:28,340 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:43:28,340 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,340 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:43:28,341 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,341 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:43:28,341 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:43:28,341 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,341 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:43:28,341 DEBUG SenderThread:1063 [sender.py:send():378] send: summary +2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:43:28,342 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:43:28,342 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,342 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:43:28,342 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,342 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:43:28,346 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:43:28,428 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:43:28,428 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:28,428 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:43:28,428 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:28,428 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:43:28,853 INFO Thread-12 :1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml +2024-05-22 18:43:28,853 INFO Thread-12 :1063 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json +2024-05-22 18:43:29,335 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:43:29,438 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:43:29,438 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:43:29,438 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:29,438 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:43:29,438 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:29,438 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:43:29,438 INFO SenderThread:1063 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:43:29,439 INFO SenderThread:1063 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:43:29,439 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:43:29,439 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:29,439 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:43:29,439 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:29,439 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:43:29,439 INFO SenderThread:1063 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:43:29,854 INFO SenderThread:1063 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log +2024-05-22 18:43:29,854 INFO SenderThread:1063 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files +2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt requirements.txt +2024-05-22 18:43:29,855 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json wandb-summary.json +2024-05-22 18:43:29,857 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml config.yaml +2024-05-22 18:43:29,859 INFO SenderThread:1063 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log output.log +2024-05-22 18:43:29,859 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:43:29,860 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:29,860 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:43:29,860 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:29,860 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:43:29,860 INFO SenderThread:1063 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:43:30,112 INFO wandb-upload_0:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/requirements.txt +2024-05-22 18:43:30,335 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:43:30,336 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:43:30,442 INFO wandb-upload_2:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/config.yaml +2024-05-22 18:43:30,474 INFO wandb-upload_3:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/output.log +2024-05-22 18:43:30,487 INFO wandb-upload_1:1063 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/files/wandb-summary.json +2024-05-22 18:43:30,687 INFO Thread-11 (_thread_body):1063 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:43:30,687 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:30,687 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:43:30,688 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:43:30,688 INFO SenderThread:1063 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:43:30,688 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:30,688 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:43:30,688 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:30,688 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:43:30,688 INFO SenderThread:1063 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:43:30,748 INFO SenderThread:1063 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:43:30,748 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:43:30,748 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:30,748 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:43:30,749 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:43:30,749 INFO HandlerThread:1063 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send():378] send: final +2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send():378] send: footer +2024-05-22 18:43:30,749 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: defer +2024-05-22 18:43:30,749 INFO SenderThread:1063 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:43:30,750 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:43:30,751 DEBUG SenderThread:1063 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:43:30,813 INFO MainThread:1063 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:43:30,814 DEBUG HandlerThread:1063 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:43:30,814 INFO HandlerThread:1063 [handler.py:finish():882] shutting down handler +2024-05-22 18:43:31,751 INFO WriterThread:1063 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184316-86p21jxx/run-86p21jxx.wandb +2024-05-22 18:43:31,813 INFO SenderThread:1063 [sender.py:finish():1545] shutting down sender +2024-05-22 18:43:31,813 INFO SenderThread:1063 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:43:31,813 INFO SenderThread:1063 [file_pusher.py:join():175] waiting for file pusher diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/auto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..96a159133cc0050a1493a40b9aa14c8c29bec46d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/__init__.py @@ -0,0 +1,403 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = { + "auto_factory": ["get_values"], + "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"], + "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"], + "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"], + "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"], + "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_auto"] = [ + "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING", + "MODEL_FOR_AUDIO_XVECTOR_MAPPING", + "MODEL_FOR_BACKBONE_MAPPING", + "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", + "MODEL_FOR_CAUSAL_LM_MAPPING", + "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_DEPTH_ESTIMATION_MAPPING", + "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_IMAGE_MAPPING", + "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", + "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING", + "MODEL_FOR_KEYPOINT_DETECTION_MAPPING", + "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", + "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "MODEL_FOR_MASKED_LM_MAPPING", + "MODEL_FOR_MASK_GENERATION_MAPPING", + "MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "MODEL_FOR_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_PRETRAINING_MAPPING", + "MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "MODEL_FOR_TEXT_ENCODING_MAPPING", + "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING", + "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING", + "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING", + "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", + "MODEL_FOR_VISION_2_SEQ_MAPPING", + "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", + "MODEL_MAPPING", + "MODEL_WITH_LM_HEAD_MAPPING", + "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", + "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING", + "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING", + "AutoModel", + "AutoBackbone", + "AutoModelForAudioClassification", + "AutoModelForAudioFrameClassification", + "AutoModelForAudioXVector", + "AutoModelForCausalLM", + "AutoModelForCTC", + "AutoModelForDepthEstimation", + "AutoModelForImageClassification", + "AutoModelForImageSegmentation", + "AutoModelForImageToImage", + "AutoModelForInstanceSegmentation", + "AutoModelForKeypointDetection", + "AutoModelForMaskGeneration", + "AutoModelForTextEncoding", + "AutoModelForMaskedImageModeling", + "AutoModelForMaskedLM", + "AutoModelForMultipleChoice", + "AutoModelForNextSentencePrediction", + "AutoModelForObjectDetection", + "AutoModelForPreTraining", + "AutoModelForQuestionAnswering", + "AutoModelForSemanticSegmentation", + "AutoModelForSeq2SeqLM", + "AutoModelForSequenceClassification", + "AutoModelForSpeechSeq2Seq", + "AutoModelForTableQuestionAnswering", + "AutoModelForTextToSpectrogram", + "AutoModelForTextToWaveform", + "AutoModelForTokenClassification", + "AutoModelForUniversalSegmentation", + "AutoModelForVideoClassification", + "AutoModelForVision2Seq", + "AutoModelForVisualQuestionAnswering", + "AutoModelForDocumentQuestionAnswering", + "AutoModelWithLMHead", + "AutoModelForZeroShotImageClassification", + "AutoModelForZeroShotObjectDetection", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_auto"] = [ + "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_MASK_GENERATION_MAPPING", + "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING", + "TF_MODEL_FOR_MASKED_LM_MAPPING", + "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "TF_MODEL_FOR_PRETRAINING_MAPPING", + "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", + "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_TEXT_ENCODING_MAPPING", + "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "TF_MODEL_FOR_VISION_2_SEQ_MAPPING", + "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING", + "TF_MODEL_MAPPING", + "TF_MODEL_WITH_LM_HEAD_MAPPING", + "TFAutoModel", + "TFAutoModelForAudioClassification", + "TFAutoModelForCausalLM", + "TFAutoModelForImageClassification", + "TFAutoModelForMaskedImageModeling", + "TFAutoModelForMaskedLM", + "TFAutoModelForMaskGeneration", + "TFAutoModelForMultipleChoice", + "TFAutoModelForNextSentencePrediction", + "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", + "TFAutoModelForQuestionAnswering", + "TFAutoModelForSemanticSegmentation", + "TFAutoModelForSeq2SeqLM", + "TFAutoModelForSequenceClassification", + "TFAutoModelForSpeechSeq2Seq", + "TFAutoModelForTableQuestionAnswering", + "TFAutoModelForTextEncoding", + "TFAutoModelForTokenClassification", + "TFAutoModelForVision2Seq", + "TFAutoModelForZeroShotImageClassification", + "TFAutoModelWithLMHead", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_auto"] = [ + "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_MASKED_LM_MAPPING", + "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", + "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", + "FLAX_MODEL_FOR_PRETRAINING_MAPPING", + "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", + "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", + "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING", + "FLAX_MODEL_MAPPING", + "FlaxAutoModel", + "FlaxAutoModelForCausalLM", + "FlaxAutoModelForImageClassification", + "FlaxAutoModelForMaskedLM", + "FlaxAutoModelForMultipleChoice", + "FlaxAutoModelForNextSentencePrediction", + "FlaxAutoModelForPreTraining", + "FlaxAutoModelForQuestionAnswering", + "FlaxAutoModelForSeq2SeqLM", + "FlaxAutoModelForSequenceClassification", + "FlaxAutoModelForSpeechSeq2Seq", + "FlaxAutoModelForTokenClassification", + "FlaxAutoModelForVision2Seq", + ] + + +if TYPE_CHECKING: + from .auto_factory import get_values + from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig + from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor + from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor + from .processing_auto import PROCESSOR_MAPPING, AutoProcessor + from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_auto import ( + MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING, + MODEL_FOR_AUDIO_XVECTOR_MAPPING, + MODEL_FOR_BACKBONE_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DEPTH_ESTIMATION_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_IMAGE_MAPPING, + MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, + MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, + MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, + MODEL_FOR_KEYPOINT_DETECTION_MAPPING, + MODEL_FOR_MASK_GENERATION_MAPPING, + MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + MODEL_FOR_OBJECT_DETECTION_MAPPING, + MODEL_FOR_PRETRAINING_MAPPING, + MODEL_FOR_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_TEXT_ENCODING_MAPPING, + MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING, + MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, + MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING, + MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, + MODEL_MAPPING, + MODEL_WITH_LM_HEAD_MAPPING, + AutoBackbone, + AutoModel, + AutoModelForAudioClassification, + AutoModelForAudioFrameClassification, + AutoModelForAudioXVector, + AutoModelForCausalLM, + AutoModelForCTC, + AutoModelForDepthEstimation, + AutoModelForDocumentQuestionAnswering, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForImageToImage, + AutoModelForInstanceSegmentation, + AutoModelForKeypointDetection, + AutoModelForMaskedImageModeling, + AutoModelForMaskedLM, + AutoModelForMaskGeneration, + AutoModelForMultipleChoice, + AutoModelForNextSentencePrediction, + AutoModelForObjectDetection, + AutoModelForPreTraining, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTableQuestionAnswering, + AutoModelForTextEncoding, + AutoModelForTextToSpectrogram, + AutoModelForTextToWaveform, + AutoModelForTokenClassification, + AutoModelForUniversalSegmentation, + AutoModelForVideoClassification, + AutoModelForVision2Seq, + AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotImageClassification, + AutoModelForZeroShotObjectDetection, + AutoModelWithLMHead, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_auto import ( + TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_MASK_GENERATION_MAPPING, + TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, + TF_MODEL_FOR_MASKED_LM_MAPPING, + TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_TEXT_ENCODING_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_MAPPING, + TF_MODEL_WITH_LM_HEAD_MAPPING, + TFAutoModel, + TFAutoModelForAudioClassification, + TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, + TFAutoModelForImageClassification, + TFAutoModelForMaskedImageModeling, + TFAutoModelForMaskedLM, + TFAutoModelForMaskGeneration, + TFAutoModelForMultipleChoice, + TFAutoModelForNextSentencePrediction, + TFAutoModelForPreTraining, + TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForSpeechSeq2Seq, + TFAutoModelForTableQuestionAnswering, + TFAutoModelForTextEncoding, + TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, + TFAutoModelForZeroShotImageClassification, + TFAutoModelWithLMHead, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_auto import ( + FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_MASKED_LM_MAPPING, + FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + FLAX_MODEL_FOR_PRETRAINING_MAPPING, + FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_MAPPING, + FlaxAutoModel, + FlaxAutoModelForCausalLM, + FlaxAutoModelForImageClassification, + FlaxAutoModelForMaskedLM, + FlaxAutoModelForMultipleChoice, + FlaxAutoModelForNextSentencePrediction, + FlaxAutoModelForPreTraining, + FlaxAutoModelForQuestionAnswering, + FlaxAutoModelForSeq2SeqLM, + FlaxAutoModelForSequenceClassification, + FlaxAutoModelForSpeechSeq2Seq, + FlaxAutoModelForTokenClassification, + FlaxAutoModelForVision2Seq, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..29a52ba755f023698aca4d9b329e731b15f8a0ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py @@ -0,0 +1,984 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Config class.""" +import importlib +import os +import re +import warnings +from collections import OrderedDict +from typing import List, Union + +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...utils import CONFIG_NAME, logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import CONFIG_ARCHIVE_MAP_MAPPING_NAMES # noqa: F401, E402 + + +CONFIG_MAPPING_NAMES = OrderedDict( + [ + # Add configs here + ("albert", "AlbertConfig"), + ("align", "AlignConfig"), + ("altclip", "AltCLIPConfig"), + ("audio-spectrogram-transformer", "ASTConfig"), + ("autoformer", "AutoformerConfig"), + ("bark", "BarkConfig"), + ("bart", "BartConfig"), + ("beit", "BeitConfig"), + ("bert", "BertConfig"), + ("bert-generation", "BertGenerationConfig"), + ("big_bird", "BigBirdConfig"), + ("bigbird_pegasus", "BigBirdPegasusConfig"), + ("biogpt", "BioGptConfig"), + ("bit", "BitConfig"), + ("blenderbot", "BlenderbotConfig"), + ("blenderbot-small", "BlenderbotSmallConfig"), + ("blip", "BlipConfig"), + ("blip-2", "Blip2Config"), + ("bloom", "BloomConfig"), + ("bridgetower", "BridgeTowerConfig"), + ("bros", "BrosConfig"), + ("camembert", "CamembertConfig"), + ("canine", "CanineConfig"), + ("chinese_clip", "ChineseCLIPConfig"), + ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"), + ("clap", "ClapConfig"), + ("clip", "CLIPConfig"), + ("clip_vision_model", "CLIPVisionConfig"), + ("clipseg", "CLIPSegConfig"), + ("clvp", "ClvpConfig"), + ("code_llama", "LlamaConfig"), + ("codegen", "CodeGenConfig"), + ("cohere", "CohereConfig"), + ("conditional_detr", "ConditionalDetrConfig"), + ("convbert", "ConvBertConfig"), + ("convnext", "ConvNextConfig"), + ("convnextv2", "ConvNextV2Config"), + ("cpmant", "CpmAntConfig"), + ("ctrl", "CTRLConfig"), + ("cvt", "CvtConfig"), + ("data2vec-audio", "Data2VecAudioConfig"), + ("data2vec-text", "Data2VecTextConfig"), + ("data2vec-vision", "Data2VecVisionConfig"), + ("dbrx", "DbrxConfig"), + ("deberta", "DebertaConfig"), + ("deberta-v2", "DebertaV2Config"), + ("decision_transformer", "DecisionTransformerConfig"), + ("deformable_detr", "DeformableDetrConfig"), + ("deit", "DeiTConfig"), + ("depth_anything", "DepthAnythingConfig"), + ("deta", "DetaConfig"), + ("detr", "DetrConfig"), + ("dinat", "DinatConfig"), + ("dinov2", "Dinov2Config"), + ("distilbert", "DistilBertConfig"), + ("donut-swin", "DonutSwinConfig"), + ("dpr", "DPRConfig"), + ("dpt", "DPTConfig"), + ("efficientformer", "EfficientFormerConfig"), + ("efficientnet", "EfficientNetConfig"), + ("electra", "ElectraConfig"), + ("encodec", "EncodecConfig"), + ("encoder-decoder", "EncoderDecoderConfig"), + ("ernie", "ErnieConfig"), + ("ernie_m", "ErnieMConfig"), + ("esm", "EsmConfig"), + ("falcon", "FalconConfig"), + ("fastspeech2_conformer", "FastSpeech2ConformerConfig"), + ("flaubert", "FlaubertConfig"), + ("flava", "FlavaConfig"), + ("fnet", "FNetConfig"), + ("focalnet", "FocalNetConfig"), + ("fsmt", "FSMTConfig"), + ("funnel", "FunnelConfig"), + ("fuyu", "FuyuConfig"), + ("gemma", "GemmaConfig"), + ("git", "GitConfig"), + ("glpn", "GLPNConfig"), + ("gpt-sw3", "GPT2Config"), + ("gpt2", "GPT2Config"), + ("gpt_bigcode", "GPTBigCodeConfig"), + ("gpt_neo", "GPTNeoConfig"), + ("gpt_neox", "GPTNeoXConfig"), + ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"), + ("gptj", "GPTJConfig"), + ("gptsan-japanese", "GPTSanJapaneseConfig"), + ("graphormer", "GraphormerConfig"), + ("grounding-dino", "GroundingDinoConfig"), + ("groupvit", "GroupViTConfig"), + ("hubert", "HubertConfig"), + ("ibert", "IBertConfig"), + ("idefics", "IdeficsConfig"), + ("idefics2", "Idefics2Config"), + ("imagegpt", "ImageGPTConfig"), + ("informer", "InformerConfig"), + ("instructblip", "InstructBlipConfig"), + ("jamba", "JambaConfig"), + ("jukebox", "JukeboxConfig"), + ("kosmos-2", "Kosmos2Config"), + ("layoutlm", "LayoutLMConfig"), + ("layoutlmv2", "LayoutLMv2Config"), + ("layoutlmv3", "LayoutLMv3Config"), + ("led", "LEDConfig"), + ("levit", "LevitConfig"), + ("lilt", "LiltConfig"), + ("llama", "LlamaConfig"), + ("llava", "LlavaConfig"), + ("llava_next", "LlavaNextConfig"), + ("longformer", "LongformerConfig"), + ("longt5", "LongT5Config"), + ("luke", "LukeConfig"), + ("lxmert", "LxmertConfig"), + ("m2m_100", "M2M100Config"), + ("mamba", "MambaConfig"), + ("marian", "MarianConfig"), + ("markuplm", "MarkupLMConfig"), + ("mask2former", "Mask2FormerConfig"), + ("maskformer", "MaskFormerConfig"), + ("maskformer-swin", "MaskFormerSwinConfig"), + ("mbart", "MBartConfig"), + ("mctct", "MCTCTConfig"), + ("mega", "MegaConfig"), + ("megatron-bert", "MegatronBertConfig"), + ("mgp-str", "MgpstrConfig"), + ("mistral", "MistralConfig"), + ("mixtral", "MixtralConfig"), + ("mobilebert", "MobileBertConfig"), + ("mobilenet_v1", "MobileNetV1Config"), + ("mobilenet_v2", "MobileNetV2Config"), + ("mobilevit", "MobileViTConfig"), + ("mobilevitv2", "MobileViTV2Config"), + ("mpnet", "MPNetConfig"), + ("mpt", "MptConfig"), + ("mra", "MraConfig"), + ("mt5", "MT5Config"), + ("musicgen", "MusicgenConfig"), + ("musicgen_melody", "MusicgenMelodyConfig"), + ("mvp", "MvpConfig"), + ("nat", "NatConfig"), + ("nezha", "NezhaConfig"), + ("nllb-moe", "NllbMoeConfig"), + ("nougat", "VisionEncoderDecoderConfig"), + ("nystromformer", "NystromformerConfig"), + ("olmo", "OlmoConfig"), + ("oneformer", "OneFormerConfig"), + ("open-llama", "OpenLlamaConfig"), + ("openai-gpt", "OpenAIGPTConfig"), + ("opt", "OPTConfig"), + ("owlv2", "Owlv2Config"), + ("owlvit", "OwlViTConfig"), + ("patchtsmixer", "PatchTSMixerConfig"), + ("patchtst", "PatchTSTConfig"), + ("pegasus", "PegasusConfig"), + ("pegasus_x", "PegasusXConfig"), + ("perceiver", "PerceiverConfig"), + ("persimmon", "PersimmonConfig"), + ("phi", "PhiConfig"), + ("pix2struct", "Pix2StructConfig"), + ("plbart", "PLBartConfig"), + ("poolformer", "PoolFormerConfig"), + ("pop2piano", "Pop2PianoConfig"), + ("prophetnet", "ProphetNetConfig"), + ("pvt", "PvtConfig"), + ("pvt_v2", "PvtV2Config"), + ("qdqbert", "QDQBertConfig"), + ("qwen2", "Qwen2Config"), + ("qwen2_moe", "Qwen2MoeConfig"), + ("rag", "RagConfig"), + ("realm", "RealmConfig"), + ("recurrent_gemma", "RecurrentGemmaConfig"), + ("reformer", "ReformerConfig"), + ("regnet", "RegNetConfig"), + ("rembert", "RemBertConfig"), + ("resnet", "ResNetConfig"), + ("retribert", "RetriBertConfig"), + ("roberta", "RobertaConfig"), + ("roberta-prelayernorm", "RobertaPreLayerNormConfig"), + ("roc_bert", "RoCBertConfig"), + ("roformer", "RoFormerConfig"), + ("rwkv", "RwkvConfig"), + ("sam", "SamConfig"), + ("seamless_m4t", "SeamlessM4TConfig"), + ("seamless_m4t_v2", "SeamlessM4Tv2Config"), + ("segformer", "SegformerConfig"), + ("seggpt", "SegGptConfig"), + ("sew", "SEWConfig"), + ("sew-d", "SEWDConfig"), + ("siglip", "SiglipConfig"), + ("siglip_vision_model", "SiglipVisionConfig"), + ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"), + ("speech_to_text", "Speech2TextConfig"), + ("speech_to_text_2", "Speech2Text2Config"), + ("speecht5", "SpeechT5Config"), + ("splinter", "SplinterConfig"), + ("squeezebert", "SqueezeBertConfig"), + ("stablelm", "StableLmConfig"), + ("starcoder2", "Starcoder2Config"), + ("superpoint", "SuperPointConfig"), + ("swiftformer", "SwiftFormerConfig"), + ("swin", "SwinConfig"), + ("swin2sr", "Swin2SRConfig"), + ("swinv2", "Swinv2Config"), + ("switch_transformers", "SwitchTransformersConfig"), + ("t5", "T5Config"), + ("table-transformer", "TableTransformerConfig"), + ("tapas", "TapasConfig"), + ("time_series_transformer", "TimeSeriesTransformerConfig"), + ("timesformer", "TimesformerConfig"), + ("timm_backbone", "TimmBackboneConfig"), + ("trajectory_transformer", "TrajectoryTransformerConfig"), + ("transfo-xl", "TransfoXLConfig"), + ("trocr", "TrOCRConfig"), + ("tvlt", "TvltConfig"), + ("tvp", "TvpConfig"), + ("udop", "UdopConfig"), + ("umt5", "UMT5Config"), + ("unispeech", "UniSpeechConfig"), + ("unispeech-sat", "UniSpeechSatConfig"), + ("univnet", "UnivNetConfig"), + ("upernet", "UperNetConfig"), + ("van", "VanConfig"), + ("videomae", "VideoMAEConfig"), + ("vilt", "ViltConfig"), + ("vipllava", "VipLlavaConfig"), + ("vision-encoder-decoder", "VisionEncoderDecoderConfig"), + ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"), + ("visual_bert", "VisualBertConfig"), + ("vit", "ViTConfig"), + ("vit_hybrid", "ViTHybridConfig"), + ("vit_mae", "ViTMAEConfig"), + ("vit_msn", "ViTMSNConfig"), + ("vitdet", "VitDetConfig"), + ("vitmatte", "VitMatteConfig"), + ("vits", "VitsConfig"), + ("vivit", "VivitConfig"), + ("wav2vec2", "Wav2Vec2Config"), + ("wav2vec2-bert", "Wav2Vec2BertConfig"), + ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), + ("wavlm", "WavLMConfig"), + ("whisper", "WhisperConfig"), + ("xclip", "XCLIPConfig"), + ("xglm", "XGLMConfig"), + ("xlm", "XLMConfig"), + ("xlm-prophetnet", "XLMProphetNetConfig"), + ("xlm-roberta", "XLMRobertaConfig"), + ("xlm-roberta-xl", "XLMRobertaXLConfig"), + ("xlnet", "XLNetConfig"), + ("xmod", "XmodConfig"), + ("yolos", "YolosConfig"), + ("yoso", "YosoConfig"), + ] +) + + +MODEL_NAMES_MAPPING = OrderedDict( + [ + # Add full (and cased) model names here + ("albert", "ALBERT"), + ("align", "ALIGN"), + ("altclip", "AltCLIP"), + ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"), + ("autoformer", "Autoformer"), + ("bark", "Bark"), + ("bart", "BART"), + ("barthez", "BARThez"), + ("bartpho", "BARTpho"), + ("beit", "BEiT"), + ("bert", "BERT"), + ("bert-generation", "Bert Generation"), + ("bert-japanese", "BertJapanese"), + ("bertweet", "BERTweet"), + ("big_bird", "BigBird"), + ("bigbird_pegasus", "BigBird-Pegasus"), + ("biogpt", "BioGpt"), + ("bit", "BiT"), + ("blenderbot", "Blenderbot"), + ("blenderbot-small", "BlenderbotSmall"), + ("blip", "BLIP"), + ("blip-2", "BLIP-2"), + ("bloom", "BLOOM"), + ("bort", "BORT"), + ("bridgetower", "BridgeTower"), + ("bros", "BROS"), + ("byt5", "ByT5"), + ("camembert", "CamemBERT"), + ("canine", "CANINE"), + ("chinese_clip", "Chinese-CLIP"), + ("chinese_clip_vision_model", "ChineseCLIPVisionModel"), + ("clap", "CLAP"), + ("clip", "CLIP"), + ("clip_vision_model", "CLIPVisionModel"), + ("clipseg", "CLIPSeg"), + ("clvp", "CLVP"), + ("code_llama", "CodeLlama"), + ("codegen", "CodeGen"), + ("cohere", "Cohere"), + ("conditional_detr", "Conditional DETR"), + ("convbert", "ConvBERT"), + ("convnext", "ConvNeXT"), + ("convnextv2", "ConvNeXTV2"), + ("cpm", "CPM"), + ("cpmant", "CPM-Ant"), + ("ctrl", "CTRL"), + ("cvt", "CvT"), + ("data2vec-audio", "Data2VecAudio"), + ("data2vec-text", "Data2VecText"), + ("data2vec-vision", "Data2VecVision"), + ("dbrx", "DBRX"), + ("deberta", "DeBERTa"), + ("deberta-v2", "DeBERTa-v2"), + ("decision_transformer", "Decision Transformer"), + ("deformable_detr", "Deformable DETR"), + ("deit", "DeiT"), + ("deplot", "DePlot"), + ("depth_anything", "Depth Anything"), + ("deta", "DETA"), + ("detr", "DETR"), + ("dialogpt", "DialoGPT"), + ("dinat", "DiNAT"), + ("dinov2", "DINOv2"), + ("distilbert", "DistilBERT"), + ("dit", "DiT"), + ("donut-swin", "DonutSwin"), + ("dpr", "DPR"), + ("dpt", "DPT"), + ("efficientformer", "EfficientFormer"), + ("efficientnet", "EfficientNet"), + ("electra", "ELECTRA"), + ("encodec", "EnCodec"), + ("encoder-decoder", "Encoder decoder"), + ("ernie", "ERNIE"), + ("ernie_m", "ErnieM"), + ("esm", "ESM"), + ("falcon", "Falcon"), + ("fastspeech2_conformer", "FastSpeech2Conformer"), + ("flan-t5", "FLAN-T5"), + ("flan-ul2", "FLAN-UL2"), + ("flaubert", "FlauBERT"), + ("flava", "FLAVA"), + ("fnet", "FNet"), + ("focalnet", "FocalNet"), + ("fsmt", "FairSeq Machine-Translation"), + ("funnel", "Funnel Transformer"), + ("fuyu", "Fuyu"), + ("gemma", "Gemma"), + ("git", "GIT"), + ("glpn", "GLPN"), + ("gpt-sw3", "GPT-Sw3"), + ("gpt2", "OpenAI GPT-2"), + ("gpt_bigcode", "GPTBigCode"), + ("gpt_neo", "GPT Neo"), + ("gpt_neox", "GPT NeoX"), + ("gpt_neox_japanese", "GPT NeoX Japanese"), + ("gptj", "GPT-J"), + ("gptsan-japanese", "GPTSAN-japanese"), + ("graphormer", "Graphormer"), + ("grounding-dino", "Grounding DINO"), + ("groupvit", "GroupViT"), + ("herbert", "HerBERT"), + ("hubert", "Hubert"), + ("ibert", "I-BERT"), + ("idefics", "IDEFICS"), + ("idefics2", "Idefics2"), + ("imagegpt", "ImageGPT"), + ("informer", "Informer"), + ("instructblip", "InstructBLIP"), + ("jamba", "Jamba"), + ("jukebox", "Jukebox"), + ("kosmos-2", "KOSMOS-2"), + ("layoutlm", "LayoutLM"), + ("layoutlmv2", "LayoutLMv2"), + ("layoutlmv3", "LayoutLMv3"), + ("layoutxlm", "LayoutXLM"), + ("led", "LED"), + ("levit", "LeViT"), + ("lilt", "LiLT"), + ("llama", "LLaMA"), + ("llama2", "Llama2"), + ("llava", "LLaVa"), + ("llava_next", "LLaVA-NeXT"), + ("longformer", "Longformer"), + ("longt5", "LongT5"), + ("luke", "LUKE"), + ("lxmert", "LXMERT"), + ("m2m_100", "M2M100"), + ("madlad-400", "MADLAD-400"), + ("mamba", "Mamba"), + ("marian", "Marian"), + ("markuplm", "MarkupLM"), + ("mask2former", "Mask2Former"), + ("maskformer", "MaskFormer"), + ("maskformer-swin", "MaskFormerSwin"), + ("matcha", "MatCha"), + ("mbart", "mBART"), + ("mbart50", "mBART-50"), + ("mctct", "M-CTC-T"), + ("mega", "MEGA"), + ("megatron-bert", "Megatron-BERT"), + ("megatron_gpt2", "Megatron-GPT2"), + ("mgp-str", "MGP-STR"), + ("mistral", "Mistral"), + ("mixtral", "Mixtral"), + ("mluke", "mLUKE"), + ("mms", "MMS"), + ("mobilebert", "MobileBERT"), + ("mobilenet_v1", "MobileNetV1"), + ("mobilenet_v2", "MobileNetV2"), + ("mobilevit", "MobileViT"), + ("mobilevitv2", "MobileViTV2"), + ("mpnet", "MPNet"), + ("mpt", "MPT"), + ("mra", "MRA"), + ("mt5", "MT5"), + ("musicgen", "MusicGen"), + ("musicgen_melody", "MusicGen Melody"), + ("mvp", "MVP"), + ("nat", "NAT"), + ("nezha", "Nezha"), + ("nllb", "NLLB"), + ("nllb-moe", "NLLB-MOE"), + ("nougat", "Nougat"), + ("nystromformer", "Nyströmformer"), + ("olmo", "OLMo"), + ("oneformer", "OneFormer"), + ("open-llama", "OpenLlama"), + ("openai-gpt", "OpenAI GPT"), + ("opt", "OPT"), + ("owlv2", "OWLv2"), + ("owlvit", "OWL-ViT"), + ("patchtsmixer", "PatchTSMixer"), + ("patchtst", "PatchTST"), + ("pegasus", "Pegasus"), + ("pegasus_x", "PEGASUS-X"), + ("perceiver", "Perceiver"), + ("persimmon", "Persimmon"), + ("phi", "Phi"), + ("phobert", "PhoBERT"), + ("pix2struct", "Pix2Struct"), + ("plbart", "PLBart"), + ("poolformer", "PoolFormer"), + ("pop2piano", "Pop2Piano"), + ("prophetnet", "ProphetNet"), + ("pvt", "PVT"), + ("pvt_v2", "PVTv2"), + ("qdqbert", "QDQBert"), + ("qwen2", "Qwen2"), + ("qwen2_moe", "Qwen2MoE"), + ("rag", "RAG"), + ("realm", "REALM"), + ("recurrent_gemma", "RecurrentGemma"), + ("reformer", "Reformer"), + ("regnet", "RegNet"), + ("rembert", "RemBERT"), + ("resnet", "ResNet"), + ("retribert", "RetriBERT"), + ("roberta", "RoBERTa"), + ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"), + ("roc_bert", "RoCBert"), + ("roformer", "RoFormer"), + ("rwkv", "RWKV"), + ("sam", "SAM"), + ("seamless_m4t", "SeamlessM4T"), + ("seamless_m4t_v2", "SeamlessM4Tv2"), + ("segformer", "SegFormer"), + ("seggpt", "SegGPT"), + ("sew", "SEW"), + ("sew-d", "SEW-D"), + ("siglip", "SigLIP"), + ("siglip_vision_model", "SiglipVisionModel"), + ("speech-encoder-decoder", "Speech Encoder decoder"), + ("speech_to_text", "Speech2Text"), + ("speech_to_text_2", "Speech2Text2"), + ("speecht5", "SpeechT5"), + ("splinter", "Splinter"), + ("squeezebert", "SqueezeBERT"), + ("stablelm", "StableLm"), + ("starcoder2", "Starcoder2"), + ("superpoint", "SuperPoint"), + ("swiftformer", "SwiftFormer"), + ("swin", "Swin Transformer"), + ("swin2sr", "Swin2SR"), + ("swinv2", "Swin Transformer V2"), + ("switch_transformers", "SwitchTransformers"), + ("t5", "T5"), + ("t5v1.1", "T5v1.1"), + ("table-transformer", "Table Transformer"), + ("tapas", "TAPAS"), + ("tapex", "TAPEX"), + ("time_series_transformer", "Time Series Transformer"), + ("timesformer", "TimeSformer"), + ("timm_backbone", "TimmBackbone"), + ("trajectory_transformer", "Trajectory Transformer"), + ("transfo-xl", "Transformer-XL"), + ("trocr", "TrOCR"), + ("tvlt", "TVLT"), + ("tvp", "TVP"), + ("udop", "UDOP"), + ("ul2", "UL2"), + ("umt5", "UMT5"), + ("unispeech", "UniSpeech"), + ("unispeech-sat", "UniSpeechSat"), + ("univnet", "UnivNet"), + ("upernet", "UPerNet"), + ("van", "VAN"), + ("videomae", "VideoMAE"), + ("vilt", "ViLT"), + ("vipllava", "VipLlava"), + ("vision-encoder-decoder", "Vision Encoder decoder"), + ("vision-text-dual-encoder", "VisionTextDualEncoder"), + ("visual_bert", "VisualBERT"), + ("vit", "ViT"), + ("vit_hybrid", "ViT Hybrid"), + ("vit_mae", "ViTMAE"), + ("vit_msn", "ViTMSN"), + ("vitdet", "VitDet"), + ("vitmatte", "ViTMatte"), + ("vits", "VITS"), + ("vivit", "ViViT"), + ("wav2vec2", "Wav2Vec2"), + ("wav2vec2-bert", "Wav2Vec2-BERT"), + ("wav2vec2-conformer", "Wav2Vec2-Conformer"), + ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), + ("wavlm", "WavLM"), + ("whisper", "Whisper"), + ("xclip", "X-CLIP"), + ("xglm", "XGLM"), + ("xlm", "XLM"), + ("xlm-prophetnet", "XLM-ProphetNet"), + ("xlm-roberta", "XLM-RoBERTa"), + ("xlm-roberta-xl", "XLM-RoBERTa-XL"), + ("xlm-v", "XLM-V"), + ("xlnet", "XLNet"), + ("xls_r", "XLS-R"), + ("xlsr_wav2vec2", "XLSR-Wav2Vec2"), + ("xmod", "X-MOD"), + ("yolos", "YOLOS"), + ("yoso", "YOSO"), + ] +) + +# This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting +# `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`. +DEPRECATED_MODELS = [ + "bort", + "mctct", + "mmbt", + "open_llama", + "retribert", + "tapex", + "trajectory_transformer", + "transfo_xl", + "van", +] + +SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict( + [ + ("openai-gpt", "openai"), + ("data2vec-audio", "data2vec"), + ("data2vec-text", "data2vec"), + ("data2vec-vision", "data2vec"), + ("donut-swin", "donut"), + ("kosmos-2", "kosmos2"), + ("maskformer-swin", "maskformer"), + ("xclip", "x_clip"), + ("clip_vision_model", "clip"), + ("siglip_vision_model", "siglip"), + ("chinese_clip_vision_model", "chinese_clip"), + ] +) + + +def model_type_to_module_name(key): + """Converts a config key to the corresponding module.""" + # Special treatment + if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: + return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] + + key = key.replace("-", "_") + if key in DEPRECATED_MODELS: + key = f"deprecated.{key}" + + return key + + +def config_class_to_model_type(config): + """Converts a config class name to the corresponding model type""" + for key, cls in CONFIG_MAPPING_NAMES.items(): + if cls == config: + return key + # if key not found check in extra content + for key, cls in CONFIG_MAPPING._extra_content.items(): + if cls.__name__ == config: + return key + return None + + +class _LazyConfigMapping(OrderedDict): + """ + A dictionary that lazily load its values when they are requested. + """ + + def __init__(self, mapping): + self._mapping = mapping + self._extra_content = {} + self._modules = {} + + def __getitem__(self, key): + if key in self._extra_content: + return self._extra_content[key] + if key not in self._mapping: + raise KeyError(key) + value = self._mapping[key] + module_name = model_type_to_module_name(key) + if module_name not in self._modules: + self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") + if hasattr(self._modules[module_name], value): + return getattr(self._modules[module_name], value) + + # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the + # object at the top level. + transformers_module = importlib.import_module("transformers") + return getattr(transformers_module, value) + + def keys(self): + return list(self._mapping.keys()) + list(self._extra_content.keys()) + + def values(self): + return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values()) + + def items(self): + return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()) + + def __iter__(self): + return iter(list(self._mapping.keys()) + list(self._extra_content.keys())) + + def __contains__(self, item): + return item in self._mapping or item in self._extra_content + + def register(self, key, value, exist_ok=False): + """ + Register a new configuration in this mapping. + """ + if key in self._mapping.keys() and not exist_ok: + raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.") + self._extra_content[key] = value + + +CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES) + + +class _LazyLoadAllMappings(OrderedDict): + """ + A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values, + etc.) + + Args: + mapping: The mapping to load. + """ + + def __init__(self, mapping): + self._mapping = mapping + self._initialized = False + self._data = {} + + def _initialize(self): + if self._initialized: + return + + for model_type, map_name in self._mapping.items(): + module_name = model_type_to_module_name(model_type) + module = importlib.import_module(f".{module_name}", "transformers.models") + mapping = getattr(module, map_name) + self._data.update(mapping) + + self._initialized = True + + def __getitem__(self, key): + self._initialize() + return self._data[key] + + def keys(self): + self._initialize() + return self._data.keys() + + def values(self): + self._initialize() + return self._data.values() + + def items(self): + self._initialize() + return self._data.keys() + + def __iter__(self): + self._initialize() + return iter(self._data) + + def __contains__(self, item): + self._initialize() + return item in self._data + + +def _get_class_name(model_class: Union[str, List[str]]): + if isinstance(model_class, (list, tuple)): + return " or ".join([f"[`{c}`]" for c in model_class if c is not None]) + return f"[`{model_class}`]" + + +def _list_model_options(indent, config_to_class=None, use_model_types=True): + if config_to_class is None and not use_model_types: + raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.") + if use_model_types: + if config_to_class is None: + model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()} + else: + model_type_to_name = { + model_type: _get_class_name(model_class) + for model_type, model_class in config_to_class.items() + if model_type in MODEL_NAMES_MAPPING + } + lines = [ + f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)" + for model_type in sorted(model_type_to_name.keys()) + ] + else: + config_to_name = { + CONFIG_MAPPING_NAMES[config]: _get_class_name(clas) + for config, clas in config_to_class.items() + if config in CONFIG_MAPPING_NAMES + } + config_to_model_name = { + config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items() + } + lines = [ + f"{indent}- [`{config_name}`] configuration class:" + f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)" + for config_name in sorted(config_to_name.keys()) + ] + return "\n".join(lines) + + +def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): + def docstring_decorator(fn): + docstrings = fn.__doc__ + if docstrings is None: + # Example: -OO + return fn + lines = docstrings.split("\n") + i = 0 + while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: + i += 1 + if i < len(lines): + indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] + if use_model_types: + indent = f"{indent} " + lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) + docstrings = "\n".join(lines) + else: + raise ValueError( + f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current" + f" docstring is:\n{docstrings}" + ) + fn.__doc__ = docstrings + return fn + + return docstring_decorator + + +class AutoConfig: + r""" + This is a generic configuration class that will be instantiated as one of the configuration classes of the library + when created with the [`~AutoConfig.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoConfig is designed to be instantiated " + "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + def for_model(cls, model_type: str, *args, **kwargs): + if model_type in CONFIG_MAPPING: + config_class = CONFIG_MAPPING[model_type] + return config_class(*args, **kwargs) + raise ValueError( + f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}" + ) + + @classmethod + @replace_list_option_in_docstrings() + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the configuration classes of the library from a pretrained model configuration. + + The configuration class to instantiate is selected based on the `model_type` property of the config object that + is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - A path to a *directory* containing a configuration file saved using the + [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method, + e.g., `./my_model_directory/`. + - A path or url to a saved configuration JSON *file*, e.g., + `./my_model_directory/configuration.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download the model weights and configuration files and override the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final configuration object. + + If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the + part of `kwargs` which has not been used to update `config` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs(additional keyword arguments, *optional*): + The values in kwargs of any keys which are configuration attributes will be used to override the loaded + values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Examples: + + ```python + >>> from transformers import AutoConfig + + >>> # Download configuration from huggingface.co and cache. + >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased") + + >>> # Download configuration from huggingface.co (user-uploaded) and cache. + >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased") + + >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). + >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/") + + >>> # Load a specific configuration file. + >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json") + + >>> # Change some config attributes when loading a pretrained config. + >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) + >>> config.output_attentions + True + + >>> config, unused_kwargs = AutoConfig.from_pretrained( + ... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True + ... ) + >>> config.output_attentions + True + + >>> unused_kwargs + {'foo': False} + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + kwargs["_from_auto"] = True + kwargs["name_or_path"] = pretrained_model_name_or_path + trust_remote_code = kwargs.pop("trust_remote_code", None) + code_revision = kwargs.pop("code_revision", None) + + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"] + has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + class_ref = config_dict["auto_map"]["AutoConfig"] + config_class = get_class_from_dynamic_module( + class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs + ) + if os.path.isdir(pretrained_model_name_or_path): + config_class.register_for_auto_class() + return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) + elif "model_type" in config_dict: + try: + config_class = CONFIG_MAPPING[config_dict["model_type"]] + except KeyError: + raise ValueError( + f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` " + "but Transformers does not recognize this architecture. This could be because of an " + "issue with the checkpoint, or because your version of Transformers is out of date." + ) + return config_class.from_dict(config_dict, **unused_kwargs) + else: + # Fallback: use pattern matching on the string. + # We go from longer names to shorter names to catch roberta before bert (for instance) + for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): + if pattern in str(pretrained_model_name_or_path): + return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) + + raise ValueError( + f"Unrecognized model in {pretrained_model_name_or_path}. " + f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings " + f"in its name: {', '.join(CONFIG_MAPPING.keys())}" + ) + + @staticmethod + def register(model_type, config, exist_ok=False): + """ + Register a new configuration for this class. + + Args: + model_type (`str`): The model type like "bert" or "gpt". + config ([`PretrainedConfig`]): The config to register. + """ + if issubclass(config, PretrainedConfig) and config.model_type != model_type: + raise ValueError( + "The config you are passing has a `model_type` attribute that is not consistent with the model type " + f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " + "match!" + ) + CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok) + + +ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..f8cb55091b02fdc055ab21c3df48062462f243b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py @@ -0,0 +1,396 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AutoFeatureExtractor class.""" +import importlib +import json +import os +import warnings +from collections import OrderedDict +from typing import Dict, Optional, Union + +# Build the list of all feature extractors +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...feature_extraction_utils import FeatureExtractionMixin +from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + model_type_to_module_name, + replace_list_option_in_docstrings, +) + + +logger = logging.get_logger(__name__) + +FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict( + [ + ("audio-spectrogram-transformer", "ASTFeatureExtractor"), + ("beit", "BeitFeatureExtractor"), + ("chinese_clip", "ChineseCLIPFeatureExtractor"), + ("clap", "ClapFeatureExtractor"), + ("clip", "CLIPFeatureExtractor"), + ("clipseg", "ViTFeatureExtractor"), + ("clvp", "ClvpFeatureExtractor"), + ("conditional_detr", "ConditionalDetrFeatureExtractor"), + ("convnext", "ConvNextFeatureExtractor"), + ("cvt", "ConvNextFeatureExtractor"), + ("data2vec-audio", "Wav2Vec2FeatureExtractor"), + ("data2vec-vision", "BeitFeatureExtractor"), + ("deformable_detr", "DeformableDetrFeatureExtractor"), + ("deit", "DeiTFeatureExtractor"), + ("detr", "DetrFeatureExtractor"), + ("dinat", "ViTFeatureExtractor"), + ("donut-swin", "DonutFeatureExtractor"), + ("dpt", "DPTFeatureExtractor"), + ("encodec", "EncodecFeatureExtractor"), + ("flava", "FlavaFeatureExtractor"), + ("glpn", "GLPNFeatureExtractor"), + ("groupvit", "CLIPFeatureExtractor"), + ("hubert", "Wav2Vec2FeatureExtractor"), + ("imagegpt", "ImageGPTFeatureExtractor"), + ("layoutlmv2", "LayoutLMv2FeatureExtractor"), + ("layoutlmv3", "LayoutLMv3FeatureExtractor"), + ("levit", "LevitFeatureExtractor"), + ("maskformer", "MaskFormerFeatureExtractor"), + ("mctct", "MCTCTFeatureExtractor"), + ("mobilenet_v1", "MobileNetV1FeatureExtractor"), + ("mobilenet_v2", "MobileNetV2FeatureExtractor"), + ("mobilevit", "MobileViTFeatureExtractor"), + ("nat", "ViTFeatureExtractor"), + ("owlvit", "OwlViTFeatureExtractor"), + ("perceiver", "PerceiverFeatureExtractor"), + ("poolformer", "PoolFormerFeatureExtractor"), + ("pop2piano", "Pop2PianoFeatureExtractor"), + ("regnet", "ConvNextFeatureExtractor"), + ("resnet", "ConvNextFeatureExtractor"), + ("seamless_m4t", "SeamlessM4TFeatureExtractor"), + ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"), + ("segformer", "SegformerFeatureExtractor"), + ("sew", "Wav2Vec2FeatureExtractor"), + ("sew-d", "Wav2Vec2FeatureExtractor"), + ("speech_to_text", "Speech2TextFeatureExtractor"), + ("speecht5", "SpeechT5FeatureExtractor"), + ("swiftformer", "ViTFeatureExtractor"), + ("swin", "ViTFeatureExtractor"), + ("swinv2", "ViTFeatureExtractor"), + ("table-transformer", "DetrFeatureExtractor"), + ("timesformer", "VideoMAEFeatureExtractor"), + ("tvlt", "TvltFeatureExtractor"), + ("unispeech", "Wav2Vec2FeatureExtractor"), + ("unispeech-sat", "Wav2Vec2FeatureExtractor"), + ("univnet", "UnivNetFeatureExtractor"), + ("van", "ConvNextFeatureExtractor"), + ("videomae", "VideoMAEFeatureExtractor"), + ("vilt", "ViltFeatureExtractor"), + ("vit", "ViTFeatureExtractor"), + ("vit_mae", "ViTFeatureExtractor"), + ("vit_msn", "ViTFeatureExtractor"), + ("wav2vec2", "Wav2Vec2FeatureExtractor"), + ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"), + ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), + ("wavlm", "Wav2Vec2FeatureExtractor"), + ("whisper", "WhisperFeatureExtractor"), + ("xclip", "CLIPFeatureExtractor"), + ("yolos", "YolosFeatureExtractor"), + ] +) + +FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) + + +def feature_extractor_class_from_name(class_name: str): + for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): + if class_name in extractors: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): + if getattr(extractor, "__name__", None) == class_name: + return extractor + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +def get_feature_extractor_config( + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Loads the tokenizer configuration from a pretrained model tokenizer configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the tokenizer. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased") + # This model does not have a tokenizer config so the result will be an empty dict. + tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base") + + # Save a pretrained tokenizer locally and you can reload its config + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") + tokenizer.save_pretrained("tokenizer-test") + tokenizer_config = get_tokenizer_config("tokenizer-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + resolved_config_file = get_file_from_repo( + pretrained_model_name_or_path, + FEATURE_EXTRACTOR_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + if resolved_config_file is None: + logger.info( + "Could not locate the feature extractor configuration file, will try to use the model config instead." + ) + return {} + + with open(resolved_config_file, encoding="utf-8") as reader: + return json.load(reader) + + +class AutoFeatureExtractor: + r""" + This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the + library when created with the [`AutoFeatureExtractor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoFeatureExtractor is designed to be instantiated " + "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary. + + The feature extractor class to instantiate is selected based on the `model_type` property of the config object + (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's + missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a feature extractor file saved using the + [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved feature extractor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model feature extractor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the feature extractor files and override the cached versions + if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final feature extractor object. If `True`, then this + functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of + `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are feature extractor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor + + >>> # Download feature extractor from huggingface.co and cache. + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") + + >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*) + >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs) + feature_extractor_class = config_dict.get("feature_extractor_type", None) + feature_extractor_auto_map = None + if "AutoFeatureExtractor" in config_dict.get("auto_map", {}): + feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"] + + # If we don't find the feature extractor class in the feature extractor config, let's try the model config. + if feature_extractor_class is None and feature_extractor_auto_map is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + # It could be in `config.feature_extractor_type`` + feature_extractor_class = getattr(config, "feature_extractor_type", None) + if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map: + feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"] + + if feature_extractor_class is not None: + feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class) + + has_remote_code = feature_extractor_auto_map is not None + has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + feature_extractor_class = get_class_from_dynamic_module( + feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + feature_extractor_class.register_for_auto_class() + return feature_extractor_class.from_dict(config_dict, **kwargs) + elif feature_extractor_class is not None: + return feature_extractor_class.from_dict(config_dict, **kwargs) + # Last try: we use the FEATURE_EXTRACTOR_MAPPING. + elif type(config) in FEATURE_EXTRACTOR_MAPPING: + feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)] + return feature_extractor_class.from_dict(config_dict, **kwargs) + + raise ValueError( + f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " + f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " + f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}" + ) + + @staticmethod + def register(config_class, feature_extractor_class, exist_ok=False): + """ + Register a new feature extractor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register. + """ + FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc4829f3f6f1ed0ce6456960f61604bfb8bce09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py @@ -0,0 +1,1705 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + +import warnings +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import ( + _BaseAutoBackboneClass, + _BaseAutoModelClass, + _LazyAutoMapping, + auto_class_update, +) +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + +MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "AlbertModel"), + ("align", "AlignModel"), + ("altclip", "AltCLIPModel"), + ("audio-spectrogram-transformer", "ASTModel"), + ("autoformer", "AutoformerModel"), + ("bark", "BarkModel"), + ("bart", "BartModel"), + ("beit", "BeitModel"), + ("bert", "BertModel"), + ("bert-generation", "BertGenerationEncoder"), + ("big_bird", "BigBirdModel"), + ("bigbird_pegasus", "BigBirdPegasusModel"), + ("biogpt", "BioGptModel"), + ("bit", "BitModel"), + ("blenderbot", "BlenderbotModel"), + ("blenderbot-small", "BlenderbotSmallModel"), + ("blip", "BlipModel"), + ("blip-2", "Blip2Model"), + ("bloom", "BloomModel"), + ("bridgetower", "BridgeTowerModel"), + ("bros", "BrosModel"), + ("camembert", "CamembertModel"), + ("canine", "CanineModel"), + ("chinese_clip", "ChineseCLIPModel"), + ("chinese_clip_vision_model", "ChineseCLIPVisionModel"), + ("clap", "ClapModel"), + ("clip", "CLIPModel"), + ("clip_vision_model", "CLIPVisionModel"), + ("clipseg", "CLIPSegModel"), + ("clvp", "ClvpModelForConditionalGeneration"), + ("code_llama", "LlamaModel"), + ("codegen", "CodeGenModel"), + ("cohere", "CohereModel"), + ("conditional_detr", "ConditionalDetrModel"), + ("convbert", "ConvBertModel"), + ("convnext", "ConvNextModel"), + ("convnextv2", "ConvNextV2Model"), + ("cpmant", "CpmAntModel"), + ("ctrl", "CTRLModel"), + ("cvt", "CvtModel"), + ("data2vec-audio", "Data2VecAudioModel"), + ("data2vec-text", "Data2VecTextModel"), + ("data2vec-vision", "Data2VecVisionModel"), + ("dbrx", "DbrxModel"), + ("deberta", "DebertaModel"), + ("deberta-v2", "DebertaV2Model"), + ("decision_transformer", "DecisionTransformerModel"), + ("deformable_detr", "DeformableDetrModel"), + ("deit", "DeiTModel"), + ("deta", "DetaModel"), + ("detr", "DetrModel"), + ("dinat", "DinatModel"), + ("dinov2", "Dinov2Model"), + ("distilbert", "DistilBertModel"), + ("donut-swin", "DonutSwinModel"), + ("dpr", "DPRQuestionEncoder"), + ("dpt", "DPTModel"), + ("efficientformer", "EfficientFormerModel"), + ("efficientnet", "EfficientNetModel"), + ("electra", "ElectraModel"), + ("encodec", "EncodecModel"), + ("ernie", "ErnieModel"), + ("ernie_m", "ErnieMModel"), + ("esm", "EsmModel"), + ("falcon", "FalconModel"), + ("fastspeech2_conformer", "FastSpeech2ConformerModel"), + ("flaubert", "FlaubertModel"), + ("flava", "FlavaModel"), + ("fnet", "FNetModel"), + ("focalnet", "FocalNetModel"), + ("fsmt", "FSMTModel"), + ("funnel", ("FunnelModel", "FunnelBaseModel")), + ("gemma", "GemmaModel"), + ("git", "GitModel"), + ("glpn", "GLPNModel"), + ("gpt-sw3", "GPT2Model"), + ("gpt2", "GPT2Model"), + ("gpt_bigcode", "GPTBigCodeModel"), + ("gpt_neo", "GPTNeoModel"), + ("gpt_neox", "GPTNeoXModel"), + ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), + ("gptj", "GPTJModel"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("graphormer", "GraphormerModel"), + ("grounding-dino", "GroundingDinoModel"), + ("groupvit", "GroupViTModel"), + ("hubert", "HubertModel"), + ("ibert", "IBertModel"), + ("idefics", "IdeficsModel"), + ("idefics2", "Idefics2Model"), + ("imagegpt", "ImageGPTModel"), + ("informer", "InformerModel"), + ("jamba", "JambaModel"), + ("jukebox", "JukeboxModel"), + ("kosmos-2", "Kosmos2Model"), + ("layoutlm", "LayoutLMModel"), + ("layoutlmv2", "LayoutLMv2Model"), + ("layoutlmv3", "LayoutLMv3Model"), + ("led", "LEDModel"), + ("levit", "LevitModel"), + ("lilt", "LiltModel"), + ("llama", "LlamaModel"), + ("longformer", "LongformerModel"), + ("longt5", "LongT5Model"), + ("luke", "LukeModel"), + ("lxmert", "LxmertModel"), + ("m2m_100", "M2M100Model"), + ("mamba", "MambaModel"), + ("marian", "MarianModel"), + ("markuplm", "MarkupLMModel"), + ("mask2former", "Mask2FormerModel"), + ("maskformer", "MaskFormerModel"), + ("maskformer-swin", "MaskFormerSwinModel"), + ("mbart", "MBartModel"), + ("mctct", "MCTCTModel"), + ("mega", "MegaModel"), + ("megatron-bert", "MegatronBertModel"), + ("mgp-str", "MgpstrForSceneTextRecognition"), + ("mistral", "MistralModel"), + ("mixtral", "MixtralModel"), + ("mobilebert", "MobileBertModel"), + ("mobilenet_v1", "MobileNetV1Model"), + ("mobilenet_v2", "MobileNetV2Model"), + ("mobilevit", "MobileViTModel"), + ("mobilevitv2", "MobileViTV2Model"), + ("mpnet", "MPNetModel"), + ("mpt", "MptModel"), + ("mra", "MraModel"), + ("mt5", "MT5Model"), + ("mvp", "MvpModel"), + ("nat", "NatModel"), + ("nezha", "NezhaModel"), + ("nllb-moe", "NllbMoeModel"), + ("nystromformer", "NystromformerModel"), + ("olmo", "OlmoModel"), + ("oneformer", "OneFormerModel"), + ("open-llama", "OpenLlamaModel"), + ("openai-gpt", "OpenAIGPTModel"), + ("opt", "OPTModel"), + ("owlv2", "Owlv2Model"), + ("owlvit", "OwlViTModel"), + ("patchtsmixer", "PatchTSMixerModel"), + ("patchtst", "PatchTSTModel"), + ("pegasus", "PegasusModel"), + ("pegasus_x", "PegasusXModel"), + ("perceiver", "PerceiverModel"), + ("persimmon", "PersimmonModel"), + ("phi", "PhiModel"), + ("plbart", "PLBartModel"), + ("poolformer", "PoolFormerModel"), + ("prophetnet", "ProphetNetModel"), + ("pvt", "PvtModel"), + ("pvt_v2", "PvtV2Model"), + ("qdqbert", "QDQBertModel"), + ("qwen2", "Qwen2Model"), + ("qwen2_moe", "Qwen2MoeModel"), + ("recurrent_gemma", "RecurrentGemmaModel"), + ("reformer", "ReformerModel"), + ("regnet", "RegNetModel"), + ("rembert", "RemBertModel"), + ("resnet", "ResNetModel"), + ("retribert", "RetriBertModel"), + ("roberta", "RobertaModel"), + ("roberta-prelayernorm", "RobertaPreLayerNormModel"), + ("roc_bert", "RoCBertModel"), + ("roformer", "RoFormerModel"), + ("rwkv", "RwkvModel"), + ("sam", "SamModel"), + ("seamless_m4t", "SeamlessM4TModel"), + ("seamless_m4t_v2", "SeamlessM4Tv2Model"), + ("segformer", "SegformerModel"), + ("seggpt", "SegGptModel"), + ("sew", "SEWModel"), + ("sew-d", "SEWDModel"), + ("siglip", "SiglipModel"), + ("siglip_vision_model", "SiglipVisionModel"), + ("speech_to_text", "Speech2TextModel"), + ("speecht5", "SpeechT5Model"), + ("splinter", "SplinterModel"), + ("squeezebert", "SqueezeBertModel"), + ("stablelm", "StableLmModel"), + ("starcoder2", "Starcoder2Model"), + ("swiftformer", "SwiftFormerModel"), + ("swin", "SwinModel"), + ("swin2sr", "Swin2SRModel"), + ("swinv2", "Swinv2Model"), + ("switch_transformers", "SwitchTransformersModel"), + ("t5", "T5Model"), + ("table-transformer", "TableTransformerModel"), + ("tapas", "TapasModel"), + ("time_series_transformer", "TimeSeriesTransformerModel"), + ("timesformer", "TimesformerModel"), + ("timm_backbone", "TimmBackbone"), + ("trajectory_transformer", "TrajectoryTransformerModel"), + ("transfo-xl", "TransfoXLModel"), + ("tvlt", "TvltModel"), + ("tvp", "TvpModel"), + ("udop", "UdopModel"), + ("umt5", "UMT5Model"), + ("unispeech", "UniSpeechModel"), + ("unispeech-sat", "UniSpeechSatModel"), + ("univnet", "UnivNetModel"), + ("van", "VanModel"), + ("videomae", "VideoMAEModel"), + ("vilt", "ViltModel"), + ("vision-text-dual-encoder", "VisionTextDualEncoderModel"), + ("visual_bert", "VisualBertModel"), + ("vit", "ViTModel"), + ("vit_hybrid", "ViTHybridModel"), + ("vit_mae", "ViTMAEModel"), + ("vit_msn", "ViTMSNModel"), + ("vitdet", "VitDetModel"), + ("vits", "VitsModel"), + ("vivit", "VivitModel"), + ("wav2vec2", "Wav2Vec2Model"), + ("wav2vec2-bert", "Wav2Vec2BertModel"), + ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), + ("wavlm", "WavLMModel"), + ("whisper", "WhisperModel"), + ("xclip", "XCLIPModel"), + ("xglm", "XGLMModel"), + ("xlm", "XLMModel"), + ("xlm-prophetnet", "XLMProphetNetModel"), + ("xlm-roberta", "XLMRobertaModel"), + ("xlm-roberta-xl", "XLMRobertaXLModel"), + ("xlnet", "XLNetModel"), + ("xmod", "XmodModel"), + ("yolos", "YolosModel"), + ("yoso", "YosoModel"), + ] +) + +MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "AlbertForPreTraining"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForPreTraining"), + ("big_bird", "BigBirdForPreTraining"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForMaskedLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForPreTraining"), + ("ernie", "ErnieForPreTraining"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("flava", "FlavaForPreTraining"), + ("fnet", "FNetForPreTraining"), + ("fsmt", "FSMTForConditionalGeneration"), + ("funnel", "FunnelForPreTraining"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("ibert", "IBertForMaskedLM"), + ("idefics", "IdeficsForVisionText2Text"), + ("idefics2", "Idefics2ForConditionalGeneration"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("llava", "LlavaForConditionalGeneration"), + ("llava_next", "LlavaNextForConditionalGeneration"), + ("longformer", "LongformerForMaskedLM"), + ("luke", "LukeForMaskedLM"), + ("lxmert", "LxmertForPreTraining"), + ("mamba", "MambaForCausalLM"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForPreTraining"), + ("mobilebert", "MobileBertForPreTraining"), + ("mpnet", "MPNetForMaskedLM"), + ("mpt", "MptForCausalLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForPreTraining"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("retribert", "RetriBertModel"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForPreTraining"), + ("rwkv", "RwkvForCausalLM"), + ("splinter", "SplinterForPreTraining"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("tapas", "TapasForMaskedLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("tvlt", "TvltForPreTraining"), + ("unispeech", "UniSpeechForPreTraining"), + ("unispeech-sat", "UniSpeechSatForPreTraining"), + ("videomae", "VideoMAEForPreTraining"), + ("vipllava", "VipLlavaForConditionalGeneration"), + ("visual_bert", "VisualBertForPreTraining"), + ("vit_mae", "ViTMAEForPreTraining"), + ("wav2vec2", "Wav2Vec2ForPreTraining"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForMaskedLM"), + ] +) + +MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( + [ + # Model with LM heads mapping + ("albert", "AlbertForMaskedLM"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForMaskedLM"), + ("big_bird", "BigBirdForMaskedLM"), + ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), + ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForMaskedLM"), + ("codegen", "CodeGenForCausalLM"), + ("convbert", "ConvBertForMaskedLM"), + ("cpmant", "CpmAntForCausalLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForMaskedLM"), + ("encoder-decoder", "EncoderDecoderModel"), + ("ernie", "ErnieForMaskedLM"), + ("esm", "EsmForMaskedLM"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("fnet", "FNetForMaskedLM"), + ("fsmt", "FSMTForConditionalGeneration"), + ("funnel", "FunnelForMaskedLM"), + ("git", "GitForCausalLM"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gpt_neo", "GPTNeoForCausalLM"), + ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), + ("gptj", "GPTJForCausalLM"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("ibert", "IBertForMaskedLM"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("led", "LEDForConditionalGeneration"), + ("longformer", "LongformerForMaskedLM"), + ("longt5", "LongT5ForConditionalGeneration"), + ("luke", "LukeForMaskedLM"), + ("m2m_100", "M2M100ForConditionalGeneration"), + ("mamba", "MambaForCausalLM"), + ("marian", "MarianMTModel"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForCausalLM"), + ("mobilebert", "MobileBertForMaskedLM"), + ("mpnet", "MPNetForMaskedLM"), + ("mpt", "MptForCausalLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForMaskedLM"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("nystromformer", "NystromformerForMaskedLM"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("pegasus_x", "PegasusXForConditionalGeneration"), + ("plbart", "PLBartForConditionalGeneration"), + ("pop2piano", "Pop2PianoForConditionalGeneration"), + ("qdqbert", "QDQBertForMaskedLM"), + ("reformer", "ReformerModelWithLMHead"), + ("rembert", "RemBertForMaskedLM"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForMaskedLM"), + ("roformer", "RoFormerForMaskedLM"), + ("rwkv", "RwkvForCausalLM"), + ("speech_to_text", "Speech2TextForConditionalGeneration"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("tapas", "TapasForMaskedLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("wav2vec2", "Wav2Vec2ForMaskedLM"), + ("whisper", "WhisperForConditionalGeneration"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForMaskedLM"), + ("yoso", "YosoForMaskedLM"), + ] +) + +MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bart", "BartForCausalLM"), + ("bert", "BertLMHeadModel"), + ("bert-generation", "BertGenerationDecoder"), + ("big_bird", "BigBirdForCausalLM"), + ("bigbird_pegasus", "BigBirdPegasusForCausalLM"), + ("biogpt", "BioGptForCausalLM"), + ("blenderbot", "BlenderbotForCausalLM"), + ("blenderbot-small", "BlenderbotSmallForCausalLM"), + ("bloom", "BloomForCausalLM"), + ("camembert", "CamembertForCausalLM"), + ("code_llama", "LlamaForCausalLM"), + ("codegen", "CodeGenForCausalLM"), + ("cohere", "CohereForCausalLM"), + ("cpmant", "CpmAntForCausalLM"), + ("ctrl", "CTRLLMHeadModel"), + ("data2vec-text", "Data2VecTextForCausalLM"), + ("dbrx", "DbrxForCausalLM"), + ("electra", "ElectraForCausalLM"), + ("ernie", "ErnieForCausalLM"), + ("falcon", "FalconForCausalLM"), + ("fuyu", "FuyuForCausalLM"), + ("gemma", "GemmaForCausalLM"), + ("git", "GitForCausalLM"), + ("gpt-sw3", "GPT2LMHeadModel"), + ("gpt2", "GPT2LMHeadModel"), + ("gpt_bigcode", "GPTBigCodeForCausalLM"), + ("gpt_neo", "GPTNeoForCausalLM"), + ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), + ("gptj", "GPTJForCausalLM"), + ("jamba", "JambaForCausalLM"), + ("llama", "LlamaForCausalLM"), + ("mamba", "MambaForCausalLM"), + ("marian", "MarianForCausalLM"), + ("mbart", "MBartForCausalLM"), + ("mega", "MegaForCausalLM"), + ("megatron-bert", "MegatronBertForCausalLM"), + ("mistral", "MistralForCausalLM"), + ("mixtral", "MixtralForCausalLM"), + ("mpt", "MptForCausalLM"), + ("musicgen", "MusicgenForCausalLM"), + ("musicgen_melody", "MusicgenMelodyForCausalLM"), + ("mvp", "MvpForCausalLM"), + ("olmo", "OlmoForCausalLM"), + ("open-llama", "OpenLlamaForCausalLM"), + ("openai-gpt", "OpenAIGPTLMHeadModel"), + ("opt", "OPTForCausalLM"), + ("pegasus", "PegasusForCausalLM"), + ("persimmon", "PersimmonForCausalLM"), + ("phi", "PhiForCausalLM"), + ("plbart", "PLBartForCausalLM"), + ("prophetnet", "ProphetNetForCausalLM"), + ("qdqbert", "QDQBertLMHeadModel"), + ("qwen2", "Qwen2ForCausalLM"), + ("qwen2_moe", "Qwen2MoeForCausalLM"), + ("recurrent_gemma", "RecurrentGemmaForCausalLM"), + ("reformer", "ReformerModelWithLMHead"), + ("rembert", "RemBertForCausalLM"), + ("roberta", "RobertaForCausalLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"), + ("roc_bert", "RoCBertForCausalLM"), + ("roformer", "RoFormerForCausalLM"), + ("rwkv", "RwkvForCausalLM"), + ("speech_to_text_2", "Speech2Text2ForCausalLM"), + ("stablelm", "StableLmForCausalLM"), + ("starcoder2", "Starcoder2ForCausalLM"), + ("transfo-xl", "TransfoXLLMHeadModel"), + ("trocr", "TrOCRForCausalLM"), + ("whisper", "WhisperForCausalLM"), + ("xglm", "XGLMForCausalLM"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-prophetnet", "XLMProphetNetForCausalLM"), + ("xlm-roberta", "XLMRobertaForCausalLM"), + ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"), + ("xlnet", "XLNetLMHeadModel"), + ("xmod", "XmodForCausalLM"), + ] +) + +MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict( + [ + # Model for Image mapping + ("beit", "BeitModel"), + ("bit", "BitModel"), + ("conditional_detr", "ConditionalDetrModel"), + ("convnext", "ConvNextModel"), + ("convnextv2", "ConvNextV2Model"), + ("data2vec-vision", "Data2VecVisionModel"), + ("deformable_detr", "DeformableDetrModel"), + ("deit", "DeiTModel"), + ("deta", "DetaModel"), + ("detr", "DetrModel"), + ("dinat", "DinatModel"), + ("dinov2", "Dinov2Model"), + ("dpt", "DPTModel"), + ("efficientformer", "EfficientFormerModel"), + ("efficientnet", "EfficientNetModel"), + ("focalnet", "FocalNetModel"), + ("glpn", "GLPNModel"), + ("imagegpt", "ImageGPTModel"), + ("levit", "LevitModel"), + ("mobilenet_v1", "MobileNetV1Model"), + ("mobilenet_v2", "MobileNetV2Model"), + ("mobilevit", "MobileViTModel"), + ("mobilevitv2", "MobileViTV2Model"), + ("nat", "NatModel"), + ("poolformer", "PoolFormerModel"), + ("pvt", "PvtModel"), + ("regnet", "RegNetModel"), + ("resnet", "ResNetModel"), + ("segformer", "SegformerModel"), + ("siglip_vision_model", "SiglipVisionModel"), + ("swiftformer", "SwiftFormerModel"), + ("swin", "SwinModel"), + ("swin2sr", "Swin2SRModel"), + ("swinv2", "Swinv2Model"), + ("table-transformer", "TableTransformerModel"), + ("timesformer", "TimesformerModel"), + ("timm_backbone", "TimmBackbone"), + ("van", "VanModel"), + ("videomae", "VideoMAEModel"), + ("vit", "ViTModel"), + ("vit_hybrid", "ViTHybridModel"), + ("vit_mae", "ViTMAEModel"), + ("vit_msn", "ViTMSNModel"), + ("vitdet", "VitDetModel"), + ("vivit", "VivitModel"), + ("yolos", "YolosModel"), + ] +) + +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + [ + ("deit", "DeiTForMaskedImageModeling"), + ("focalnet", "FocalNetForMaskedImageModeling"), + ("swin", "SwinForMaskedImageModeling"), + ("swinv2", "Swinv2ForMaskedImageModeling"), + ("vit", "ViTForMaskedImageModeling"), + ] +) + + +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + # Model for Causal Image Modeling mapping + [ + ("imagegpt", "ImageGPTForCausalImageModeling"), + ] +) + +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image Classification mapping + ("beit", "BeitForImageClassification"), + ("bit", "BitForImageClassification"), + ("clip", "CLIPForImageClassification"), + ("convnext", "ConvNextForImageClassification"), + ("convnextv2", "ConvNextV2ForImageClassification"), + ("cvt", "CvtForImageClassification"), + ("data2vec-vision", "Data2VecVisionForImageClassification"), + ( + "deit", + ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher"), + ), + ("dinat", "DinatForImageClassification"), + ("dinov2", "Dinov2ForImageClassification"), + ( + "efficientformer", + ( + "EfficientFormerForImageClassification", + "EfficientFormerForImageClassificationWithTeacher", + ), + ), + ("efficientnet", "EfficientNetForImageClassification"), + ("focalnet", "FocalNetForImageClassification"), + ("imagegpt", "ImageGPTForImageClassification"), + ( + "levit", + ("LevitForImageClassification", "LevitForImageClassificationWithTeacher"), + ), + ("mobilenet_v1", "MobileNetV1ForImageClassification"), + ("mobilenet_v2", "MobileNetV2ForImageClassification"), + ("mobilevit", "MobileViTForImageClassification"), + ("mobilevitv2", "MobileViTV2ForImageClassification"), + ("nat", "NatForImageClassification"), + ( + "perceiver", + ( + "PerceiverForImageClassificationLearned", + "PerceiverForImageClassificationFourier", + "PerceiverForImageClassificationConvProcessing", + ), + ), + ("poolformer", "PoolFormerForImageClassification"), + ("pvt", "PvtForImageClassification"), + ("pvt_v2", "PvtV2ForImageClassification"), + ("regnet", "RegNetForImageClassification"), + ("resnet", "ResNetForImageClassification"), + ("segformer", "SegformerForImageClassification"), + ("siglip", "SiglipForImageClassification"), + ("swiftformer", "SwiftFormerForImageClassification"), + ("swin", "SwinForImageClassification"), + ("swinv2", "Swinv2ForImageClassification"), + ("van", "VanForImageClassification"), + ("vit", "ViTForImageClassification"), + ("vit_hybrid", "ViTHybridForImageClassification"), + ("vit_msn", "ViTMSNForImageClassification"), + ] +) + +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Do not add new models here, this class will be deprecated in the future. + # Model for Image Segmentation mapping + ("detr", "DetrForSegmentation"), + ] +) + +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Semantic Segmentation mapping + ("beit", "BeitForSemanticSegmentation"), + ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"), + ("dpt", "DPTForSemanticSegmentation"), + ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"), + ("mobilevit", "MobileViTForSemanticSegmentation"), + ("mobilevitv2", "MobileViTV2ForSemanticSegmentation"), + ("segformer", "SegformerForSemanticSegmentation"), + ("upernet", "UperNetForSemanticSegmentation"), + ] +) + +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Instance Segmentation mapping + # MaskFormerForInstanceSegmentation can be removed from this mapping in v5 + ("maskformer", "MaskFormerForInstanceSegmentation"), + ] +) + +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Universal Segmentation mapping + ("detr", "DetrForSegmentation"), + ("mask2former", "Mask2FormerForUniversalSegmentation"), + ("maskformer", "MaskFormerForInstanceSegmentation"), + ("oneformer", "OneFormerForUniversalSegmentation"), + ] +) + +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("timesformer", "TimesformerForVideoClassification"), + ("videomae", "VideoMAEForVideoClassification"), + ("vivit", "VivitForVideoClassification"), + ] +) + +MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("blip", "BlipForConditionalGeneration"), + ("blip-2", "Blip2ForConditionalGeneration"), + ("git", "GitForCausalLM"), + ("idefics2", "Idefics2ForConditionalGeneration"), + ("instructblip", "InstructBlipForConditionalGeneration"), + ("kosmos-2", "Kosmos2ForConditionalGeneration"), + ("llava", "LlavaForConditionalGeneration"), + ("llava_next", "LlavaNextForConditionalGeneration"), + ("pix2struct", "Pix2StructForConditionalGeneration"), + ("vipllava", "VipLlavaForConditionalGeneration"), + ("vision-encoder-decoder", "VisionEncoderDecoderModel"), + ] +) + +MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "AlbertForMaskedLM"), + ("bart", "BartForConditionalGeneration"), + ("bert", "BertForMaskedLM"), + ("big_bird", "BigBirdForMaskedLM"), + ("camembert", "CamembertForMaskedLM"), + ("convbert", "ConvBertForMaskedLM"), + ("data2vec-text", "Data2VecTextForMaskedLM"), + ("deberta", "DebertaForMaskedLM"), + ("deberta-v2", "DebertaV2ForMaskedLM"), + ("distilbert", "DistilBertForMaskedLM"), + ("electra", "ElectraForMaskedLM"), + ("ernie", "ErnieForMaskedLM"), + ("esm", "EsmForMaskedLM"), + ("flaubert", "FlaubertWithLMHeadModel"), + ("fnet", "FNetForMaskedLM"), + ("funnel", "FunnelForMaskedLM"), + ("ibert", "IBertForMaskedLM"), + ("layoutlm", "LayoutLMForMaskedLM"), + ("longformer", "LongformerForMaskedLM"), + ("luke", "LukeForMaskedLM"), + ("mbart", "MBartForConditionalGeneration"), + ("mega", "MegaForMaskedLM"), + ("megatron-bert", "MegatronBertForMaskedLM"), + ("mobilebert", "MobileBertForMaskedLM"), + ("mpnet", "MPNetForMaskedLM"), + ("mra", "MraForMaskedLM"), + ("mvp", "MvpForConditionalGeneration"), + ("nezha", "NezhaForMaskedLM"), + ("nystromformer", "NystromformerForMaskedLM"), + ("perceiver", "PerceiverForMaskedLM"), + ("qdqbert", "QDQBertForMaskedLM"), + ("reformer", "ReformerForMaskedLM"), + ("rembert", "RemBertForMaskedLM"), + ("roberta", "RobertaForMaskedLM"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"), + ("roc_bert", "RoCBertForMaskedLM"), + ("roformer", "RoFormerForMaskedLM"), + ("squeezebert", "SqueezeBertForMaskedLM"), + ("tapas", "TapasForMaskedLM"), + ("wav2vec2", "Wav2Vec2ForMaskedLM"), + ("xlm", "XLMWithLMHeadModel"), + ("xlm-roberta", "XLMRobertaForMaskedLM"), + ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), + ("xmod", "XmodForMaskedLM"), + ("yoso", "YosoForMaskedLM"), + ] +) + +MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + # Model for Object Detection mapping + ("conditional_detr", "ConditionalDetrForObjectDetection"), + ("deformable_detr", "DeformableDetrForObjectDetection"), + ("deta", "DetaForObjectDetection"), + ("detr", "DetrForObjectDetection"), + ("table-transformer", "TableTransformerForObjectDetection"), + ("yolos", "YolosForObjectDetection"), + ] +) + +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Object Detection mapping + ("grounding-dino", "GroundingDinoForObjectDetection"), + ("owlv2", "Owlv2ForObjectDetection"), + ("owlvit", "OwlViTForObjectDetection"), + ] +) + +MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict( + [ + # Model for depth estimation mapping + ("depth_anything", "DepthAnythingForDepthEstimation"), + ("dpt", "DPTForDepthEstimation"), + ("glpn", "GLPNForDepthEstimation"), + ] +) +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "BartForConditionalGeneration"), + ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"), + ("blenderbot", "BlenderbotForConditionalGeneration"), + ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "EncoderDecoderModel"), + ("fsmt", "FSMTForConditionalGeneration"), + ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"), + ("led", "LEDForConditionalGeneration"), + ("longt5", "LongT5ForConditionalGeneration"), + ("m2m_100", "M2M100ForConditionalGeneration"), + ("marian", "MarianMTModel"), + ("mbart", "MBartForConditionalGeneration"), + ("mt5", "MT5ForConditionalGeneration"), + ("mvp", "MvpForConditionalGeneration"), + ("nllb-moe", "NllbMoeForConditionalGeneration"), + ("pegasus", "PegasusForConditionalGeneration"), + ("pegasus_x", "PegasusXForConditionalGeneration"), + ("plbart", "PLBartForConditionalGeneration"), + ("prophetnet", "ProphetNetForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForTextToText"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToText"), + ("switch_transformers", "SwitchTransformersForConditionalGeneration"), + ("t5", "T5ForConditionalGeneration"), + ("umt5", "UMT5ForConditionalGeneration"), + ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"), + ] +) + +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("pop2piano", "Pop2PianoForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForSpeechToText"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForSpeechToText"), + ("speech-encoder-decoder", "SpeechEncoderDecoderModel"), + ("speech_to_text", "Speech2TextForConditionalGeneration"), + ("speecht5", "SpeechT5ForSpeechToText"), + ("whisper", "WhisperForConditionalGeneration"), + ] +) + +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "AlbertForSequenceClassification"), + ("bart", "BartForSequenceClassification"), + ("bert", "BertForSequenceClassification"), + ("big_bird", "BigBirdForSequenceClassification"), + ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"), + ("biogpt", "BioGptForSequenceClassification"), + ("bloom", "BloomForSequenceClassification"), + ("camembert", "CamembertForSequenceClassification"), + ("canine", "CanineForSequenceClassification"), + ("code_llama", "LlamaForSequenceClassification"), + ("convbert", "ConvBertForSequenceClassification"), + ("ctrl", "CTRLForSequenceClassification"), + ("data2vec-text", "Data2VecTextForSequenceClassification"), + ("deberta", "DebertaForSequenceClassification"), + ("deberta-v2", "DebertaV2ForSequenceClassification"), + ("distilbert", "DistilBertForSequenceClassification"), + ("electra", "ElectraForSequenceClassification"), + ("ernie", "ErnieForSequenceClassification"), + ("ernie_m", "ErnieMForSequenceClassification"), + ("esm", "EsmForSequenceClassification"), + ("falcon", "FalconForSequenceClassification"), + ("flaubert", "FlaubertForSequenceClassification"), + ("fnet", "FNetForSequenceClassification"), + ("funnel", "FunnelForSequenceClassification"), + ("gemma", "GemmaForSequenceClassification"), + ("gpt-sw3", "GPT2ForSequenceClassification"), + ("gpt2", "GPT2ForSequenceClassification"), + ("gpt_bigcode", "GPTBigCodeForSequenceClassification"), + ("gpt_neo", "GPTNeoForSequenceClassification"), + ("gpt_neox", "GPTNeoXForSequenceClassification"), + ("gptj", "GPTJForSequenceClassification"), + ("ibert", "IBertForSequenceClassification"), + ("jamba", "JambaForSequenceClassification"), + ("layoutlm", "LayoutLMForSequenceClassification"), + ("layoutlmv2", "LayoutLMv2ForSequenceClassification"), + ("layoutlmv3", "LayoutLMv3ForSequenceClassification"), + ("led", "LEDForSequenceClassification"), + ("lilt", "LiltForSequenceClassification"), + ("llama", "LlamaForSequenceClassification"), + ("longformer", "LongformerForSequenceClassification"), + ("luke", "LukeForSequenceClassification"), + ("markuplm", "MarkupLMForSequenceClassification"), + ("mbart", "MBartForSequenceClassification"), + ("mega", "MegaForSequenceClassification"), + ("megatron-bert", "MegatronBertForSequenceClassification"), + ("mistral", "MistralForSequenceClassification"), + ("mixtral", "MixtralForSequenceClassification"), + ("mobilebert", "MobileBertForSequenceClassification"), + ("mpnet", "MPNetForSequenceClassification"), + ("mpt", "MptForSequenceClassification"), + ("mra", "MraForSequenceClassification"), + ("mt5", "MT5ForSequenceClassification"), + ("mvp", "MvpForSequenceClassification"), + ("nezha", "NezhaForSequenceClassification"), + ("nystromformer", "NystromformerForSequenceClassification"), + ("open-llama", "OpenLlamaForSequenceClassification"), + ("openai-gpt", "OpenAIGPTForSequenceClassification"), + ("opt", "OPTForSequenceClassification"), + ("perceiver", "PerceiverForSequenceClassification"), + ("persimmon", "PersimmonForSequenceClassification"), + ("phi", "PhiForSequenceClassification"), + ("plbart", "PLBartForSequenceClassification"), + ("qdqbert", "QDQBertForSequenceClassification"), + ("qwen2", "Qwen2ForSequenceClassification"), + ("qwen2_moe", "Qwen2MoeForSequenceClassification"), + ("reformer", "ReformerForSequenceClassification"), + ("rembert", "RemBertForSequenceClassification"), + ("roberta", "RobertaForSequenceClassification"), + ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"), + ("roc_bert", "RoCBertForSequenceClassification"), + ("roformer", "RoFormerForSequenceClassification"), + ("squeezebert", "SqueezeBertForSequenceClassification"), + ("stablelm", "StableLmForSequenceClassification"), + ("starcoder2", "Starcoder2ForSequenceClassification"), + ("t5", "T5ForSequenceClassification"), + ("tapas", "TapasForSequenceClassification"), + ("transfo-xl", "TransfoXLForSequenceClassification"), + ("umt5", "UMT5ForSequenceClassification"), + ("xlm", "XLMForSequenceClassification"), + ("xlm-roberta", "XLMRobertaForSequenceClassification"), + ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"), + ("xlnet", "XLNetForSequenceClassification"), + ("xmod", "XmodForSequenceClassification"), + ("yoso", "YosoForSequenceClassification"), + ] +) + +MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "AlbertForQuestionAnswering"), + ("bart", "BartForQuestionAnswering"), + ("bert", "BertForQuestionAnswering"), + ("big_bird", "BigBirdForQuestionAnswering"), + ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), + ("bloom", "BloomForQuestionAnswering"), + ("camembert", "CamembertForQuestionAnswering"), + ("canine", "CanineForQuestionAnswering"), + ("convbert", "ConvBertForQuestionAnswering"), + ("data2vec-text", "Data2VecTextForQuestionAnswering"), + ("deberta", "DebertaForQuestionAnswering"), + ("deberta-v2", "DebertaV2ForQuestionAnswering"), + ("distilbert", "DistilBertForQuestionAnswering"), + ("electra", "ElectraForQuestionAnswering"), + ("ernie", "ErnieForQuestionAnswering"), + ("ernie_m", "ErnieMForQuestionAnswering"), + ("falcon", "FalconForQuestionAnswering"), + ("flaubert", "FlaubertForQuestionAnsweringSimple"), + ("fnet", "FNetForQuestionAnswering"), + ("funnel", "FunnelForQuestionAnswering"), + ("gpt2", "GPT2ForQuestionAnswering"), + ("gpt_neo", "GPTNeoForQuestionAnswering"), + ("gpt_neox", "GPTNeoXForQuestionAnswering"), + ("gptj", "GPTJForQuestionAnswering"), + ("ibert", "IBertForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ("led", "LEDForQuestionAnswering"), + ("lilt", "LiltForQuestionAnswering"), + ("llama", "LlamaForQuestionAnswering"), + ("longformer", "LongformerForQuestionAnswering"), + ("luke", "LukeForQuestionAnswering"), + ("lxmert", "LxmertForQuestionAnswering"), + ("markuplm", "MarkupLMForQuestionAnswering"), + ("mbart", "MBartForQuestionAnswering"), + ("mega", "MegaForQuestionAnswering"), + ("megatron-bert", "MegatronBertForQuestionAnswering"), + ("mobilebert", "MobileBertForQuestionAnswering"), + ("mpnet", "MPNetForQuestionAnswering"), + ("mpt", "MptForQuestionAnswering"), + ("mra", "MraForQuestionAnswering"), + ("mt5", "MT5ForQuestionAnswering"), + ("mvp", "MvpForQuestionAnswering"), + ("nezha", "NezhaForQuestionAnswering"), + ("nystromformer", "NystromformerForQuestionAnswering"), + ("opt", "OPTForQuestionAnswering"), + ("qdqbert", "QDQBertForQuestionAnswering"), + ("reformer", "ReformerForQuestionAnswering"), + ("rembert", "RemBertForQuestionAnswering"), + ("roberta", "RobertaForQuestionAnswering"), + ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"), + ("roc_bert", "RoCBertForQuestionAnswering"), + ("roformer", "RoFormerForQuestionAnswering"), + ("splinter", "SplinterForQuestionAnswering"), + ("squeezebert", "SqueezeBertForQuestionAnswering"), + ("t5", "T5ForQuestionAnswering"), + ("umt5", "UMT5ForQuestionAnswering"), + ("xlm", "XLMForQuestionAnsweringSimple"), + ("xlm-roberta", "XLMRobertaForQuestionAnswering"), + ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"), + ("xlnet", "XLNetForQuestionAnsweringSimple"), + ("xmod", "XmodForQuestionAnswering"), + ("yoso", "YosoForQuestionAnswering"), + ] +) + +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Table Question Answering mapping + ("tapas", "TapasForQuestionAnswering"), + ] +) + +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("blip", "BlipForQuestionAnswering"), + ("blip-2", "Blip2ForConditionalGeneration"), + ("vilt", "ViltForQuestionAnswering"), + ] +) + +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "LayoutLMForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ] +) + +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "AlbertForTokenClassification"), + ("bert", "BertForTokenClassification"), + ("big_bird", "BigBirdForTokenClassification"), + ("biogpt", "BioGptForTokenClassification"), + ("bloom", "BloomForTokenClassification"), + ("bros", "BrosForTokenClassification"), + ("camembert", "CamembertForTokenClassification"), + ("canine", "CanineForTokenClassification"), + ("convbert", "ConvBertForTokenClassification"), + ("data2vec-text", "Data2VecTextForTokenClassification"), + ("deberta", "DebertaForTokenClassification"), + ("deberta-v2", "DebertaV2ForTokenClassification"), + ("distilbert", "DistilBertForTokenClassification"), + ("electra", "ElectraForTokenClassification"), + ("ernie", "ErnieForTokenClassification"), + ("ernie_m", "ErnieMForTokenClassification"), + ("esm", "EsmForTokenClassification"), + ("falcon", "FalconForTokenClassification"), + ("flaubert", "FlaubertForTokenClassification"), + ("fnet", "FNetForTokenClassification"), + ("funnel", "FunnelForTokenClassification"), + ("gpt-sw3", "GPT2ForTokenClassification"), + ("gpt2", "GPT2ForTokenClassification"), + ("gpt_bigcode", "GPTBigCodeForTokenClassification"), + ("gpt_neo", "GPTNeoForTokenClassification"), + ("gpt_neox", "GPTNeoXForTokenClassification"), + ("ibert", "IBertForTokenClassification"), + ("layoutlm", "LayoutLMForTokenClassification"), + ("layoutlmv2", "LayoutLMv2ForTokenClassification"), + ("layoutlmv3", "LayoutLMv3ForTokenClassification"), + ("lilt", "LiltForTokenClassification"), + ("longformer", "LongformerForTokenClassification"), + ("luke", "LukeForTokenClassification"), + ("markuplm", "MarkupLMForTokenClassification"), + ("mega", "MegaForTokenClassification"), + ("megatron-bert", "MegatronBertForTokenClassification"), + ("mobilebert", "MobileBertForTokenClassification"), + ("mpnet", "MPNetForTokenClassification"), + ("mpt", "MptForTokenClassification"), + ("mra", "MraForTokenClassification"), + ("mt5", "MT5ForTokenClassification"), + ("nezha", "NezhaForTokenClassification"), + ("nystromformer", "NystromformerForTokenClassification"), + ("phi", "PhiForTokenClassification"), + ("qdqbert", "QDQBertForTokenClassification"), + ("rembert", "RemBertForTokenClassification"), + ("roberta", "RobertaForTokenClassification"), + ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"), + ("roc_bert", "RoCBertForTokenClassification"), + ("roformer", "RoFormerForTokenClassification"), + ("squeezebert", "SqueezeBertForTokenClassification"), + ("t5", "T5ForTokenClassification"), + ("umt5", "UMT5ForTokenClassification"), + ("xlm", "XLMForTokenClassification"), + ("xlm-roberta", "XLMRobertaForTokenClassification"), + ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"), + ("xlnet", "XLNetForTokenClassification"), + ("xmod", "XmodForTokenClassification"), + ("yoso", "YosoForTokenClassification"), + ] +) + +MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "AlbertForMultipleChoice"), + ("bert", "BertForMultipleChoice"), + ("big_bird", "BigBirdForMultipleChoice"), + ("camembert", "CamembertForMultipleChoice"), + ("canine", "CanineForMultipleChoice"), + ("convbert", "ConvBertForMultipleChoice"), + ("data2vec-text", "Data2VecTextForMultipleChoice"), + ("deberta-v2", "DebertaV2ForMultipleChoice"), + ("distilbert", "DistilBertForMultipleChoice"), + ("electra", "ElectraForMultipleChoice"), + ("ernie", "ErnieForMultipleChoice"), + ("ernie_m", "ErnieMForMultipleChoice"), + ("flaubert", "FlaubertForMultipleChoice"), + ("fnet", "FNetForMultipleChoice"), + ("funnel", "FunnelForMultipleChoice"), + ("ibert", "IBertForMultipleChoice"), + ("longformer", "LongformerForMultipleChoice"), + ("luke", "LukeForMultipleChoice"), + ("mega", "MegaForMultipleChoice"), + ("megatron-bert", "MegatronBertForMultipleChoice"), + ("mobilebert", "MobileBertForMultipleChoice"), + ("mpnet", "MPNetForMultipleChoice"), + ("mra", "MraForMultipleChoice"), + ("nezha", "NezhaForMultipleChoice"), + ("nystromformer", "NystromformerForMultipleChoice"), + ("qdqbert", "QDQBertForMultipleChoice"), + ("rembert", "RemBertForMultipleChoice"), + ("roberta", "RobertaForMultipleChoice"), + ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"), + ("roc_bert", "RoCBertForMultipleChoice"), + ("roformer", "RoFormerForMultipleChoice"), + ("squeezebert", "SqueezeBertForMultipleChoice"), + ("xlm", "XLMForMultipleChoice"), + ("xlm-roberta", "XLMRobertaForMultipleChoice"), + ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"), + ("xlnet", "XLNetForMultipleChoice"), + ("xmod", "XmodForMultipleChoice"), + ("yoso", "YosoForMultipleChoice"), + ] +) + +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "BertForNextSentencePrediction"), + ("ernie", "ErnieForNextSentencePrediction"), + ("fnet", "FNetForNextSentencePrediction"), + ("megatron-bert", "MegatronBertForNextSentencePrediction"), + ("mobilebert", "MobileBertForNextSentencePrediction"), + ("nezha", "NezhaForNextSentencePrediction"), + ("qdqbert", "QDQBertForNextSentencePrediction"), + ] +) + +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("audio-spectrogram-transformer", "ASTForAudioClassification"), + ("data2vec-audio", "Data2VecAudioForSequenceClassification"), + ("hubert", "HubertForSequenceClassification"), + ("sew", "SEWForSequenceClassification"), + ("sew-d", "SEWDForSequenceClassification"), + ("unispeech", "UniSpeechForSequenceClassification"), + ("unispeech-sat", "UniSpeechSatForSequenceClassification"), + ("wav2vec2", "Wav2Vec2ForSequenceClassification"), + ("wav2vec2-bert", "Wav2Vec2BertForSequenceClassification"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"), + ("wavlm", "WavLMForSequenceClassification"), + ("whisper", "WhisperForAudioClassification"), + ] +) + +MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict( + [ + # Model for Connectionist temporal classification (CTC) mapping + ("data2vec-audio", "Data2VecAudioForCTC"), + ("hubert", "HubertForCTC"), + ("mctct", "MCTCTForCTC"), + ("sew", "SEWForCTC"), + ("sew-d", "SEWDForCTC"), + ("unispeech", "UniSpeechForCTC"), + ("unispeech-sat", "UniSpeechSatForCTC"), + ("wav2vec2", "Wav2Vec2ForCTC"), + ("wav2vec2-bert", "Wav2Vec2BertForCTC"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"), + ("wavlm", "WavLMForCTC"), + ] +) + +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"), + ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"), + ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"), + ("wav2vec2-bert", "Wav2Vec2BertForAudioFrameClassification"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"), + ("wavlm", "WavLMForAudioFrameClassification"), + ] +) + +MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict( + [ + # Model for Audio Classification mapping + ("data2vec-audio", "Data2VecAudioForXVector"), + ("unispeech-sat", "UniSpeechSatForXVector"), + ("wav2vec2", "Wav2Vec2ForXVector"), + ("wav2vec2-bert", "Wav2Vec2BertForXVector"), + ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"), + ("wavlm", "WavLMForXVector"), + ] +) + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict( + [ + # Model for Text-To-Spectrogram mapping + ("fastspeech2_conformer", "FastSpeech2ConformerModel"), + ("speecht5", "SpeechT5ForTextToSpeech"), + ] +) + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict( + [ + # Model for Text-To-Waveform mapping + ("bark", "BarkModel"), + ("fastspeech2_conformer", "FastSpeech2ConformerWithHifiGan"), + ("musicgen", "MusicgenForConditionalGeneration"), + ("musicgen_melody", "MusicgenMelodyForConditionalGeneration"), + ("seamless_m4t", "SeamlessM4TForTextToSpeech"), + ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"), + ("vits", "VitsModel"), + ] +) + +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Image Classification mapping + ("align", "AlignModel"), + ("altclip", "AltCLIPModel"), + ("blip", "BlipModel"), + ("chinese_clip", "ChineseCLIPModel"), + ("clip", "CLIPModel"), + ("clipseg", "CLIPSegModel"), + ("siglip", "SiglipModel"), + ] +) + +MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict( + [ + # Backbone mapping + ("beit", "BeitBackbone"), + ("bit", "BitBackbone"), + ("convnext", "ConvNextBackbone"), + ("convnextv2", "ConvNextV2Backbone"), + ("dinat", "DinatBackbone"), + ("dinov2", "Dinov2Backbone"), + ("focalnet", "FocalNetBackbone"), + ("maskformer-swin", "MaskFormerSwinBackbone"), + ("nat", "NatBackbone"), + ("pvt_v2", "PvtV2Backbone"), + ("resnet", "ResNetBackbone"), + ("swin", "SwinBackbone"), + ("swinv2", "Swinv2Backbone"), + ("timm_backbone", "TimmBackbone"), + ("vitdet", "VitDetBackbone"), + ] +) + +MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict( + [ + ("sam", "SamModel"), + ] +) + + +MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + ("superpoint", "SuperPointForKeypointDetection"), + ] +) + + +MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict( + [ + ("albert", "AlbertModel"), + ("bert", "BertModel"), + ("big_bird", "BigBirdModel"), + ("data2vec-text", "Data2VecTextModel"), + ("deberta", "DebertaModel"), + ("deberta-v2", "DebertaV2Model"), + ("distilbert", "DistilBertModel"), + ("electra", "ElectraModel"), + ("flaubert", "FlaubertModel"), + ("ibert", "IBertModel"), + ("longformer", "LongformerModel"), + ("mobilebert", "MobileBertModel"), + ("mt5", "MT5EncoderModel"), + ("nystromformer", "NystromformerModel"), + ("reformer", "ReformerModel"), + ("rembert", "RemBertModel"), + ("roberta", "RobertaModel"), + ("roberta-prelayernorm", "RobertaPreLayerNormModel"), + ("roc_bert", "RoCBertModel"), + ("roformer", "RoFormerModel"), + ("squeezebert", "SqueezeBertModel"), + ("t5", "T5EncoderModel"), + ("umt5", "UMT5EncoderModel"), + ("xlm", "XLMModel"), + ("xlm-roberta", "XLMRobertaModel"), + ("xlm-roberta-xl", "XLMRobertaXLModel"), + ] +) + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("patchtsmixer", "PatchTSMixerForTimeSeriesClassification"), + ("patchtst", "PatchTSTForClassification"), + ] +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict( + [ + ("patchtsmixer", "PatchTSMixerForRegression"), + ("patchtst", "PatchTSTForRegression"), + ] +) + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict( + [ + ("swin2sr", "Swin2SRForImageSuperResolution"), + ] +) + +MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES) +MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES) +MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES) +MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES +) +MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES +) +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) +MODEL_FOR_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES) +MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES +) +MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES) +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES +) +MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES) +MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES +) +MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES) +MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES) +MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES) +MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES +) +MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES) + +MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES +) + +MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES) + +MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES) + +MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) + +MODEL_FOR_KEYPOINT_DETECTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES +) + +MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) + +MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES +) + +MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES +) + +MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) + + +class AutoModelForMaskGeneration(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING + + +class AutoModelForKeypointDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING + + +class AutoModelForTextEncoding(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING + + +class AutoModelForImageToImage(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING + + +class AutoModel(_BaseAutoModelClass): + _model_mapping = MODEL_MAPPING + + +AutoModel = auto_class_update(AutoModel) + + +class AutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_PRETRAINING_MAPPING + + +AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining") + + +# Private on purpose, the public class will add the deprecation warnings. +class _AutoModelWithLMHead(_BaseAutoModelClass): + _model_mapping = MODEL_WITH_LM_HEAD_MAPPING + + +_AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling") + + +class AutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING + + +AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling") + + +class AutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASKED_LM_MAPPING + + +AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling") + + +class AutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +AutoModelForSeq2SeqLM = auto_class_update( + AutoModelForSeq2SeqLM, + head_doc="sequence-to-sequence language modeling", + checkpoint_for_example="google-t5/t5-base", +) + + +class AutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +AutoModelForSequenceClassification = auto_class_update( + AutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class AutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering") + + +class AutoModelForTableQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + + +AutoModelForTableQuestionAnswering = auto_class_update( + AutoModelForTableQuestionAnswering, + head_doc="table question answering", + checkpoint_for_example="google/tapas-base-finetuned-wtq", +) + + +class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING + + +AutoModelForVisualQuestionAnswering = auto_class_update( + AutoModelForVisualQuestionAnswering, + head_doc="visual question answering", + checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa", +) + + +class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +AutoModelForDocumentQuestionAnswering = auto_class_update( + AutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', +) + + +class AutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification") + + +class AutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice") + + +class AutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +AutoModelForNextSentencePrediction = auto_class_update( + AutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class AutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification") + + +class AutoModelForZeroShotImageClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING + + +AutoModelForZeroShotImageClassification = auto_class_update( + AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" +) + + +class AutoModelForImageSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING + + +AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation") + + +class AutoModelForSemanticSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING + + +AutoModelForSemanticSegmentation = auto_class_update( + AutoModelForSemanticSegmentation, head_doc="semantic segmentation" +) + + +class AutoModelForUniversalSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING + + +AutoModelForUniversalSegmentation = auto_class_update( + AutoModelForUniversalSegmentation, head_doc="universal image segmentation" +) + + +class AutoModelForInstanceSegmentation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING + + +AutoModelForInstanceSegmentation = auto_class_update( + AutoModelForInstanceSegmentation, head_doc="instance segmentation" +) + + +class AutoModelForObjectDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING + + +AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection") + + +class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING + + +AutoModelForZeroShotObjectDetection = auto_class_update( + AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection" +) + + +class AutoModelForDepthEstimation(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING + + +AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation") + + +class AutoModelForVideoClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING + + +AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification") + + +class AutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING + + +AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class AutoModelForAudioClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING + + +AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification") + + +class AutoModelForCTC(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_CTC_MAPPING + + +AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification") + + +class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +AutoModelForSpeechSeq2Seq = auto_class_update( + AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) + + +class AutoModelForAudioFrameClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING + + +AutoModelForAudioFrameClassification = auto_class_update( + AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification" +) + + +class AutoModelForAudioXVector(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING + + +class AutoModelForTextToSpectrogram(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING + + +class AutoModelForTextToWaveform(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING + + +class AutoBackbone(_BaseAutoBackboneClass): + _model_mapping = MODEL_FOR_BACKBONE_MAPPING + + +AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector") + + +class AutoModelForMaskedImageModeling(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING + + +AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling") + + +class AutoModelWithLMHead(_AutoModelWithLMHead): + @classmethod + def from_config(cls, config): + warnings.warn( + "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " + "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " + "`AutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_config(config) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + warnings.warn( + "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use " + "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and " + "`AutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..f8e62bf0f2a3b233ff7bfb8410152b0a8b85462b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py @@ -0,0 +1,382 @@ +# coding=utf-8 +# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + + +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + + +FLAX_MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "FlaxAlbertModel"), + ("bart", "FlaxBartModel"), + ("beit", "FlaxBeitModel"), + ("bert", "FlaxBertModel"), + ("big_bird", "FlaxBigBirdModel"), + ("blenderbot", "FlaxBlenderbotModel"), + ("blenderbot-small", "FlaxBlenderbotSmallModel"), + ("bloom", "FlaxBloomModel"), + ("clip", "FlaxCLIPModel"), + ("distilbert", "FlaxDistilBertModel"), + ("electra", "FlaxElectraModel"), + ("gemma", "FlaxGemmaModel"), + ("gpt-sw3", "FlaxGPT2Model"), + ("gpt2", "FlaxGPT2Model"), + ("gpt_neo", "FlaxGPTNeoModel"), + ("gptj", "FlaxGPTJModel"), + ("llama", "FlaxLlamaModel"), + ("longt5", "FlaxLongT5Model"), + ("marian", "FlaxMarianModel"), + ("mbart", "FlaxMBartModel"), + ("mistral", "FlaxMistralModel"), + ("mt5", "FlaxMT5Model"), + ("opt", "FlaxOPTModel"), + ("pegasus", "FlaxPegasusModel"), + ("regnet", "FlaxRegNetModel"), + ("resnet", "FlaxResNetModel"), + ("roberta", "FlaxRobertaModel"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), + ("roformer", "FlaxRoFormerModel"), + ("t5", "FlaxT5Model"), + ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), + ("vit", "FlaxViTModel"), + ("wav2vec2", "FlaxWav2Vec2Model"), + ("whisper", "FlaxWhisperModel"), + ("xglm", "FlaxXGLMModel"), + ("xlm-roberta", "FlaxXLMRobertaModel"), + ] +) + +FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "FlaxAlbertForPreTraining"), + ("bart", "FlaxBartForConditionalGeneration"), + ("bert", "FlaxBertForPreTraining"), + ("big_bird", "FlaxBigBirdForPreTraining"), + ("electra", "FlaxElectraForPreTraining"), + ("longt5", "FlaxLongT5ForConditionalGeneration"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("mt5", "FlaxMT5ForConditionalGeneration"), + ("roberta", "FlaxRobertaForMaskedLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), + ("roformer", "FlaxRoFormerForMaskedLM"), + ("t5", "FlaxT5ForConditionalGeneration"), + ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), + ("whisper", "FlaxWhisperForConditionalGeneration"), + ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), + ] +) + +FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "FlaxAlbertForMaskedLM"), + ("bart", "FlaxBartForConditionalGeneration"), + ("bert", "FlaxBertForMaskedLM"), + ("big_bird", "FlaxBigBirdForMaskedLM"), + ("distilbert", "FlaxDistilBertForMaskedLM"), + ("electra", "FlaxElectraForMaskedLM"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("roberta", "FlaxRobertaForMaskedLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), + ("roformer", "FlaxRoFormerForMaskedLM"), + ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), + ] +) + +FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "FlaxBartForConditionalGeneration"), + ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), + ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "FlaxEncoderDecoderModel"), + ("longt5", "FlaxLongT5ForConditionalGeneration"), + ("marian", "FlaxMarianMTModel"), + ("mbart", "FlaxMBartForConditionalGeneration"), + ("mt5", "FlaxMT5ForConditionalGeneration"), + ("pegasus", "FlaxPegasusForConditionalGeneration"), + ("t5", "FlaxT5ForConditionalGeneration"), + ] +) + +FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image-classsification + ("beit", "FlaxBeitForImageClassification"), + ("regnet", "FlaxRegNetForImageClassification"), + ("resnet", "FlaxResNetForImageClassification"), + ("vit", "FlaxViTForImageClassification"), + ] +) + +FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), + ] +) + +FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bart", "FlaxBartForCausalLM"), + ("bert", "FlaxBertForCausalLM"), + ("big_bird", "FlaxBigBirdForCausalLM"), + ("bloom", "FlaxBloomForCausalLM"), + ("electra", "FlaxElectraForCausalLM"), + ("gemma", "FlaxGemmaForCausalLM"), + ("gpt-sw3", "FlaxGPT2LMHeadModel"), + ("gpt2", "FlaxGPT2LMHeadModel"), + ("gpt_neo", "FlaxGPTNeoForCausalLM"), + ("gptj", "FlaxGPTJForCausalLM"), + ("llama", "FlaxLlamaForCausalLM"), + ("mistral", "FlaxMistralForCausalLM"), + ("opt", "FlaxOPTForCausalLM"), + ("roberta", "FlaxRobertaForCausalLM"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), + ("xglm", "FlaxXGLMForCausalLM"), + ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), + ] +) + +FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "FlaxAlbertForSequenceClassification"), + ("bart", "FlaxBartForSequenceClassification"), + ("bert", "FlaxBertForSequenceClassification"), + ("big_bird", "FlaxBigBirdForSequenceClassification"), + ("distilbert", "FlaxDistilBertForSequenceClassification"), + ("electra", "FlaxElectraForSequenceClassification"), + ("mbart", "FlaxMBartForSequenceClassification"), + ("roberta", "FlaxRobertaForSequenceClassification"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), + ("roformer", "FlaxRoFormerForSequenceClassification"), + ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), + ] +) + +FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "FlaxAlbertForQuestionAnswering"), + ("bart", "FlaxBartForQuestionAnswering"), + ("bert", "FlaxBertForQuestionAnswering"), + ("big_bird", "FlaxBigBirdForQuestionAnswering"), + ("distilbert", "FlaxDistilBertForQuestionAnswering"), + ("electra", "FlaxElectraForQuestionAnswering"), + ("mbart", "FlaxMBartForQuestionAnswering"), + ("roberta", "FlaxRobertaForQuestionAnswering"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), + ("roformer", "FlaxRoFormerForQuestionAnswering"), + ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), + ] +) + +FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "FlaxAlbertForTokenClassification"), + ("bert", "FlaxBertForTokenClassification"), + ("big_bird", "FlaxBigBirdForTokenClassification"), + ("distilbert", "FlaxDistilBertForTokenClassification"), + ("electra", "FlaxElectraForTokenClassification"), + ("roberta", "FlaxRobertaForTokenClassification"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), + ("roformer", "FlaxRoFormerForTokenClassification"), + ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), + ] +) + +FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "FlaxAlbertForMultipleChoice"), + ("bert", "FlaxBertForMultipleChoice"), + ("big_bird", "FlaxBigBirdForMultipleChoice"), + ("distilbert", "FlaxDistilBertForMultipleChoice"), + ("electra", "FlaxElectraForMultipleChoice"), + ("roberta", "FlaxRobertaForMultipleChoice"), + ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), + ("roformer", "FlaxRoFormerForMultipleChoice"), + ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), + ] +) + +FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "FlaxBertForNextSentencePrediction"), + ] +) + +FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), + ("whisper", "FlaxWhisperForConditionalGeneration"), + ] +) + +FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("whisper", "FlaxWhisperForAudioClassification"), + ] +) + +FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) +FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) +FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) +FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES +) +FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES +) +FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) + + +class FlaxAutoModel(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_MAPPING + + +FlaxAutoModel = auto_class_update(FlaxAutoModel) + + +class FlaxAutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING + + +FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") + + +class FlaxAutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING + + +FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") + + +class FlaxAutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING + + +FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") + + +class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +FlaxAutoModelForSeq2SeqLM = auto_class_update( + FlaxAutoModelForSeq2SeqLM, + head_doc="sequence-to-sequence language modeling", + checkpoint_for_example="google-t5/t5-base", +) + + +class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +FlaxAutoModelForSequenceClassification = auto_class_update( + FlaxAutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") + + +class FlaxAutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +FlaxAutoModelForTokenClassification = auto_class_update( + FlaxAutoModelForTokenClassification, head_doc="token classification" +) + + +class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") + + +class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +FlaxAutoModelForNextSentencePrediction = auto_class_update( + FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class FlaxAutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +FlaxAutoModelForImageClassification = auto_class_update( + FlaxAutoModelForImageClassification, head_doc="image classification" +) + + +class FlaxAutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING + + +FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +FlaxAutoModelForSpeechSeq2Seq = auto_class_update( + FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..deed743162e4774751af454a755aad020219cbe0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py @@ -0,0 +1,721 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Model class.""" + + +import warnings +from collections import OrderedDict + +from ...utils import logging +from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update +from .configuration_auto import CONFIG_MAPPING_NAMES + + +logger = logging.get_logger(__name__) + + +TF_MODEL_MAPPING_NAMES = OrderedDict( + [ + # Base model mapping + ("albert", "TFAlbertModel"), + ("bart", "TFBartModel"), + ("bert", "TFBertModel"), + ("blenderbot", "TFBlenderbotModel"), + ("blenderbot-small", "TFBlenderbotSmallModel"), + ("blip", "TFBlipModel"), + ("camembert", "TFCamembertModel"), + ("clip", "TFCLIPModel"), + ("convbert", "TFConvBertModel"), + ("convnext", "TFConvNextModel"), + ("convnextv2", "TFConvNextV2Model"), + ("ctrl", "TFCTRLModel"), + ("cvt", "TFCvtModel"), + ("data2vec-vision", "TFData2VecVisionModel"), + ("deberta", "TFDebertaModel"), + ("deberta-v2", "TFDebertaV2Model"), + ("deit", "TFDeiTModel"), + ("distilbert", "TFDistilBertModel"), + ("dpr", "TFDPRQuestionEncoder"), + ("efficientformer", "TFEfficientFormerModel"), + ("electra", "TFElectraModel"), + ("esm", "TFEsmModel"), + ("flaubert", "TFFlaubertModel"), + ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")), + ("gpt-sw3", "TFGPT2Model"), + ("gpt2", "TFGPT2Model"), + ("gptj", "TFGPTJModel"), + ("groupvit", "TFGroupViTModel"), + ("hubert", "TFHubertModel"), + ("layoutlm", "TFLayoutLMModel"), + ("layoutlmv3", "TFLayoutLMv3Model"), + ("led", "TFLEDModel"), + ("longformer", "TFLongformerModel"), + ("lxmert", "TFLxmertModel"), + ("marian", "TFMarianModel"), + ("mbart", "TFMBartModel"), + ("mobilebert", "TFMobileBertModel"), + ("mobilevit", "TFMobileViTModel"), + ("mpnet", "TFMPNetModel"), + ("mt5", "TFMT5Model"), + ("openai-gpt", "TFOpenAIGPTModel"), + ("opt", "TFOPTModel"), + ("pegasus", "TFPegasusModel"), + ("regnet", "TFRegNetModel"), + ("rembert", "TFRemBertModel"), + ("resnet", "TFResNetModel"), + ("roberta", "TFRobertaModel"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"), + ("roformer", "TFRoFormerModel"), + ("sam", "TFSamModel"), + ("segformer", "TFSegformerModel"), + ("speech_to_text", "TFSpeech2TextModel"), + ("swin", "TFSwinModel"), + ("t5", "TFT5Model"), + ("tapas", "TFTapasModel"), + ("transfo-xl", "TFTransfoXLModel"), + ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"), + ("vit", "TFViTModel"), + ("vit_mae", "TFViTMAEModel"), + ("wav2vec2", "TFWav2Vec2Model"), + ("whisper", "TFWhisperModel"), + ("xglm", "TFXGLMModel"), + ("xlm", "TFXLMModel"), + ("xlm-roberta", "TFXLMRobertaModel"), + ("xlnet", "TFXLNetModel"), + ] +) + +TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( + [ + # Model for pre-training mapping + ("albert", "TFAlbertForPreTraining"), + ("bart", "TFBartForConditionalGeneration"), + ("bert", "TFBertForPreTraining"), + ("camembert", "TFCamembertForMaskedLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForPreTraining"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForPreTraining"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("lxmert", "TFLxmertForPreTraining"), + ("mobilebert", "TFMobileBertForPreTraining"), + ("mpnet", "TFMPNetForMaskedLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("t5", "TFT5ForConditionalGeneration"), + ("tapas", "TFTapasForMaskedLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("vit_mae", "TFViTMAEForPreTraining"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( + [ + # Model with LM heads mapping + ("albert", "TFAlbertForMaskedLM"), + ("bart", "TFBartForConditionalGeneration"), + ("bert", "TFBertForMaskedLM"), + ("camembert", "TFCamembertForMaskedLM"), + ("convbert", "TFConvBertForMaskedLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForMaskedLM"), + ("esm", "TFEsmForMaskedLM"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForMaskedLM"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("gptj", "TFGPTJForCausalLM"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("led", "TFLEDForConditionalGeneration"), + ("longformer", "TFLongformerForMaskedLM"), + ("marian", "TFMarianMTModel"), + ("mobilebert", "TFMobileBertForMaskedLM"), + ("mpnet", "TFMPNetForMaskedLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("rembert", "TFRemBertForMaskedLM"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("roformer", "TFRoFormerForMaskedLM"), + ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), + ("t5", "TFT5ForConditionalGeneration"), + ("tapas", "TFTapasForMaskedLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("whisper", "TFWhisperForConditionalGeneration"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Causal LM mapping + ("bert", "TFBertLMHeadModel"), + ("camembert", "TFCamembertForCausalLM"), + ("ctrl", "TFCTRLLMHeadModel"), + ("gpt-sw3", "TFGPT2LMHeadModel"), + ("gpt2", "TFGPT2LMHeadModel"), + ("gptj", "TFGPTJForCausalLM"), + ("openai-gpt", "TFOpenAIGPTLMHeadModel"), + ("opt", "TFOPTForCausalLM"), + ("rembert", "TFRemBertForCausalLM"), + ("roberta", "TFRobertaForCausalLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"), + ("roformer", "TFRoFormerForCausalLM"), + ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("xglm", "TFXGLMForCausalLM"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForCausalLM"), + ("xlnet", "TFXLNetLMHeadModel"), + ] +) + +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( + [ + ("deit", "TFDeiTForMaskedImageModeling"), + ("swin", "TFSwinForMaskedImageModeling"), + ] +) + +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Image-classsification + ("convnext", "TFConvNextForImageClassification"), + ("convnextv2", "TFConvNextV2ForImageClassification"), + ("cvt", "TFCvtForImageClassification"), + ("data2vec-vision", "TFData2VecVisionForImageClassification"), + ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")), + ( + "efficientformer", + ("TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher"), + ), + ("mobilevit", "TFMobileViTForImageClassification"), + ("regnet", "TFRegNetForImageClassification"), + ("resnet", "TFResNetForImageClassification"), + ("segformer", "TFSegformerForImageClassification"), + ("swin", "TFSwinForImageClassification"), + ("vit", "TFViTForImageClassification"), + ] +) + + +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Image Classification mapping + ("blip", "TFBlipModel"), + ("clip", "TFCLIPModel"), + ] +) + + +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Semantic Segmentation mapping + ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"), + ("mobilevit", "TFMobileViTForSemanticSegmentation"), + ("segformer", "TFSegformerForSemanticSegmentation"), + ] +) + +TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("blip", "TFBlipForConditionalGeneration"), + ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"), + ] +) + +TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Masked LM mapping + ("albert", "TFAlbertForMaskedLM"), + ("bert", "TFBertForMaskedLM"), + ("camembert", "TFCamembertForMaskedLM"), + ("convbert", "TFConvBertForMaskedLM"), + ("deberta", "TFDebertaForMaskedLM"), + ("deberta-v2", "TFDebertaV2ForMaskedLM"), + ("distilbert", "TFDistilBertForMaskedLM"), + ("electra", "TFElectraForMaskedLM"), + ("esm", "TFEsmForMaskedLM"), + ("flaubert", "TFFlaubertWithLMHeadModel"), + ("funnel", "TFFunnelForMaskedLM"), + ("layoutlm", "TFLayoutLMForMaskedLM"), + ("longformer", "TFLongformerForMaskedLM"), + ("mobilebert", "TFMobileBertForMaskedLM"), + ("mpnet", "TFMPNetForMaskedLM"), + ("rembert", "TFRemBertForMaskedLM"), + ("roberta", "TFRobertaForMaskedLM"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), + ("roformer", "TFRoFormerForMaskedLM"), + ("tapas", "TFTapasForMaskedLM"), + ("xlm", "TFXLMWithLMHeadModel"), + ("xlm-roberta", "TFXLMRobertaForMaskedLM"), + ] +) + +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( + [ + # Model for Seq2Seq Causal LM mapping + ("bart", "TFBartForConditionalGeneration"), + ("blenderbot", "TFBlenderbotForConditionalGeneration"), + ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"), + ("encoder-decoder", "TFEncoderDecoderModel"), + ("led", "TFLEDForConditionalGeneration"), + ("marian", "TFMarianMTModel"), + ("mbart", "TFMBartForConditionalGeneration"), + ("mt5", "TFMT5ForConditionalGeneration"), + ("pegasus", "TFPegasusForConditionalGeneration"), + ("t5", "TFT5ForConditionalGeneration"), + ] +) + +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( + [ + ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), + ("whisper", "TFWhisperForConditionalGeneration"), + ] +) + +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Sequence Classification mapping + ("albert", "TFAlbertForSequenceClassification"), + ("bart", "TFBartForSequenceClassification"), + ("bert", "TFBertForSequenceClassification"), + ("camembert", "TFCamembertForSequenceClassification"), + ("convbert", "TFConvBertForSequenceClassification"), + ("ctrl", "TFCTRLForSequenceClassification"), + ("deberta", "TFDebertaForSequenceClassification"), + ("deberta-v2", "TFDebertaV2ForSequenceClassification"), + ("distilbert", "TFDistilBertForSequenceClassification"), + ("electra", "TFElectraForSequenceClassification"), + ("esm", "TFEsmForSequenceClassification"), + ("flaubert", "TFFlaubertForSequenceClassification"), + ("funnel", "TFFunnelForSequenceClassification"), + ("gpt-sw3", "TFGPT2ForSequenceClassification"), + ("gpt2", "TFGPT2ForSequenceClassification"), + ("gptj", "TFGPTJForSequenceClassification"), + ("layoutlm", "TFLayoutLMForSequenceClassification"), + ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"), + ("longformer", "TFLongformerForSequenceClassification"), + ("mobilebert", "TFMobileBertForSequenceClassification"), + ("mpnet", "TFMPNetForSequenceClassification"), + ("openai-gpt", "TFOpenAIGPTForSequenceClassification"), + ("rembert", "TFRemBertForSequenceClassification"), + ("roberta", "TFRobertaForSequenceClassification"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"), + ("roformer", "TFRoFormerForSequenceClassification"), + ("tapas", "TFTapasForSequenceClassification"), + ("transfo-xl", "TFTransfoXLForSequenceClassification"), + ("xlm", "TFXLMForSequenceClassification"), + ("xlm-roberta", "TFXLMRobertaForSequenceClassification"), + ("xlnet", "TFXLNetForSequenceClassification"), + ] +) + +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Question Answering mapping + ("albert", "TFAlbertForQuestionAnswering"), + ("bert", "TFBertForQuestionAnswering"), + ("camembert", "TFCamembertForQuestionAnswering"), + ("convbert", "TFConvBertForQuestionAnswering"), + ("deberta", "TFDebertaForQuestionAnswering"), + ("deberta-v2", "TFDebertaV2ForQuestionAnswering"), + ("distilbert", "TFDistilBertForQuestionAnswering"), + ("electra", "TFElectraForQuestionAnswering"), + ("flaubert", "TFFlaubertForQuestionAnsweringSimple"), + ("funnel", "TFFunnelForQuestionAnswering"), + ("gptj", "TFGPTJForQuestionAnswering"), + ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), + ("longformer", "TFLongformerForQuestionAnswering"), + ("mobilebert", "TFMobileBertForQuestionAnswering"), + ("mpnet", "TFMPNetForQuestionAnswering"), + ("rembert", "TFRemBertForQuestionAnswering"), + ("roberta", "TFRobertaForQuestionAnswering"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"), + ("roformer", "TFRoFormerForQuestionAnswering"), + ("xlm", "TFXLMForQuestionAnsweringSimple"), + ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"), + ("xlnet", "TFXLNetForQuestionAnsweringSimple"), + ] +) +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([("wav2vec2", "TFWav2Vec2ForSequenceClassification")]) + +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "TFLayoutLMForQuestionAnswering"), + ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), + ] +) + + +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + # Model for Table Question Answering mapping + ("tapas", "TFTapasForQuestionAnswering"), + ] +) + +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Token Classification mapping + ("albert", "TFAlbertForTokenClassification"), + ("bert", "TFBertForTokenClassification"), + ("camembert", "TFCamembertForTokenClassification"), + ("convbert", "TFConvBertForTokenClassification"), + ("deberta", "TFDebertaForTokenClassification"), + ("deberta-v2", "TFDebertaV2ForTokenClassification"), + ("distilbert", "TFDistilBertForTokenClassification"), + ("electra", "TFElectraForTokenClassification"), + ("esm", "TFEsmForTokenClassification"), + ("flaubert", "TFFlaubertForTokenClassification"), + ("funnel", "TFFunnelForTokenClassification"), + ("layoutlm", "TFLayoutLMForTokenClassification"), + ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"), + ("longformer", "TFLongformerForTokenClassification"), + ("mobilebert", "TFMobileBertForTokenClassification"), + ("mpnet", "TFMPNetForTokenClassification"), + ("rembert", "TFRemBertForTokenClassification"), + ("roberta", "TFRobertaForTokenClassification"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"), + ("roformer", "TFRoFormerForTokenClassification"), + ("xlm", "TFXLMForTokenClassification"), + ("xlm-roberta", "TFXLMRobertaForTokenClassification"), + ("xlnet", "TFXLNetForTokenClassification"), + ] +) + +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( + [ + # Model for Multiple Choice mapping + ("albert", "TFAlbertForMultipleChoice"), + ("bert", "TFBertForMultipleChoice"), + ("camembert", "TFCamembertForMultipleChoice"), + ("convbert", "TFConvBertForMultipleChoice"), + ("deberta-v2", "TFDebertaV2ForMultipleChoice"), + ("distilbert", "TFDistilBertForMultipleChoice"), + ("electra", "TFElectraForMultipleChoice"), + ("flaubert", "TFFlaubertForMultipleChoice"), + ("funnel", "TFFunnelForMultipleChoice"), + ("longformer", "TFLongformerForMultipleChoice"), + ("mobilebert", "TFMobileBertForMultipleChoice"), + ("mpnet", "TFMPNetForMultipleChoice"), + ("rembert", "TFRemBertForMultipleChoice"), + ("roberta", "TFRobertaForMultipleChoice"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"), + ("roformer", "TFRoFormerForMultipleChoice"), + ("xlm", "TFXLMForMultipleChoice"), + ("xlm-roberta", "TFXLMRobertaForMultipleChoice"), + ("xlnet", "TFXLNetForMultipleChoice"), + ] +) + +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( + [ + ("bert", "TFBertForNextSentencePrediction"), + ("mobilebert", "TFMobileBertForNextSentencePrediction"), + ] +) +TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict( + [ + ("sam", "TFSamModel"), + ] +) +TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict( + [ + ("albert", "TFAlbertModel"), + ("bert", "TFBertModel"), + ("convbert", "TFConvBertModel"), + ("deberta", "TFDebertaModel"), + ("deberta-v2", "TFDebertaV2Model"), + ("distilbert", "TFDistilBertModel"), + ("electra", "TFElectraModel"), + ("flaubert", "TFFlaubertModel"), + ("longformer", "TFLongformerModel"), + ("mobilebert", "TFMobileBertModel"), + ("mt5", "TFMT5EncoderModel"), + ("rembert", "TFRemBertModel"), + ("roberta", "TFRobertaModel"), + ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"), + ("roformer", "TFRoFormerModel"), + ("t5", "TFT5EncoderModel"), + ("xlm", "TFXLMModel"), + ("xlm-roberta", "TFXLMRobertaModel"), + ] +) + +TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES) +TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES) +TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES) +TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) +TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES +) +TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES +) +TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) +TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES) +TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES +) +TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES +) +TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES +) +TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES +) +TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES +) +TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES +) +TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES +) + +TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES +) + +TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES) + + +class TFAutoModelForMaskGeneration(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING + + +class TFAutoModelForTextEncoding(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING + + +class TFAutoModel(_BaseAutoModelClass): + _model_mapping = TF_MODEL_MAPPING + + +TFAutoModel = auto_class_update(TFAutoModel) + + +class TFAutoModelForAudioClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING + + +TFAutoModelForAudioClassification = auto_class_update( + TFAutoModelForAudioClassification, head_doc="audio classification" +) + + +class TFAutoModelForPreTraining(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING + + +TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining") + + +# Private on purpose, the public class will add the deprecation warnings. +class _TFAutoModelWithLMHead(_BaseAutoModelClass): + _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING + + +_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling") + + +class TFAutoModelForCausalLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING + + +TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling") + + +class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING + + +TFAutoModelForMaskedImageModeling = auto_class_update( + TFAutoModelForMaskedImageModeling, head_doc="masked image modeling" +) + + +class TFAutoModelForImageClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING + + +TFAutoModelForImageClassification = auto_class_update( + TFAutoModelForImageClassification, head_doc="image classification" +) + + +class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING + + +TFAutoModelForZeroShotImageClassification = auto_class_update( + TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" +) + + +class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING + + +TFAutoModelForSemanticSegmentation = auto_class_update( + TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation" +) + + +class TFAutoModelForVision2Seq(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING + + +TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling") + + +class TFAutoModelForMaskedLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING + + +TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling") + + +class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING + + +TFAutoModelForSeq2SeqLM = auto_class_update( + TFAutoModelForSeq2SeqLM, + head_doc="sequence-to-sequence language modeling", + checkpoint_for_example="google-t5/t5-base", +) + + +class TFAutoModelForSequenceClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING + + +TFAutoModelForSequenceClassification = auto_class_update( + TFAutoModelForSequenceClassification, head_doc="sequence classification" +) + + +class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering") + + +class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForDocumentQuestionAnswering = auto_class_update( + TFAutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', +) + + +class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForTableQuestionAnswering = auto_class_update( + TFAutoModelForTableQuestionAnswering, + head_doc="table question answering", + checkpoint_for_example="google/tapas-base-finetuned-wtq", +) + + +class TFAutoModelForTokenClassification(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING + + +TFAutoModelForTokenClassification = auto_class_update( + TFAutoModelForTokenClassification, head_doc="token classification" +) + + +class TFAutoModelForMultipleChoice(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING + + +TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice") + + +class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING + + +TFAutoModelForNextSentencePrediction = auto_class_update( + TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction" +) + + +class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING + + +TFAutoModelForSpeechSeq2Seq = auto_class_update( + TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" +) + + +class TFAutoModelWithLMHead(_TFAutoModelWithLMHead): + @classmethod + def from_config(cls, config): + warnings.warn( + "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" + " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" + " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_config(config) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + warnings.warn( + "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" + " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" + " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", + FutureWarning, + ) + return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..a7134f26a7d60c899c7a6bf320031ba075241716 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py @@ -0,0 +1,358 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" AutoProcessor class.""" +import importlib +import inspect +import json +import os +import warnings +from collections import OrderedDict + +# Build the list of all feature extractors +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...feature_extraction_utils import FeatureExtractionMixin +from ...image_processing_utils import ImageProcessingMixin +from ...processing_utils import ProcessorMixin +from ...tokenization_utils import TOKENIZER_CONFIG_FILE +from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + model_type_to_module_name, + replace_list_option_in_docstrings, +) +from .feature_extraction_auto import AutoFeatureExtractor +from .image_processing_auto import AutoImageProcessor +from .tokenization_auto import AutoTokenizer + + +logger = logging.get_logger(__name__) + +PROCESSOR_MAPPING_NAMES = OrderedDict( + [ + ("align", "AlignProcessor"), + ("altclip", "AltCLIPProcessor"), + ("bark", "BarkProcessor"), + ("blip", "BlipProcessor"), + ("blip-2", "Blip2Processor"), + ("bridgetower", "BridgeTowerProcessor"), + ("chinese_clip", "ChineseCLIPProcessor"), + ("clap", "ClapProcessor"), + ("clip", "CLIPProcessor"), + ("clipseg", "CLIPSegProcessor"), + ("clvp", "ClvpProcessor"), + ("flava", "FlavaProcessor"), + ("fuyu", "FuyuProcessor"), + ("git", "GitProcessor"), + ("groupvit", "CLIPProcessor"), + ("hubert", "Wav2Vec2Processor"), + ("idefics", "IdeficsProcessor"), + ("idefics2", "Idefics2Processor"), + ("instructblip", "InstructBlipProcessor"), + ("kosmos-2", "Kosmos2Processor"), + ("layoutlmv2", "LayoutLMv2Processor"), + ("layoutlmv3", "LayoutLMv3Processor"), + ("llava", "LlavaProcessor"), + ("llava_next", "LlavaNextProcessor"), + ("markuplm", "MarkupLMProcessor"), + ("mctct", "MCTCTProcessor"), + ("mgp-str", "MgpstrProcessor"), + ("oneformer", "OneFormerProcessor"), + ("owlv2", "Owlv2Processor"), + ("owlvit", "OwlViTProcessor"), + ("pix2struct", "Pix2StructProcessor"), + ("pop2piano", "Pop2PianoProcessor"), + ("sam", "SamProcessor"), + ("seamless_m4t", "SeamlessM4TProcessor"), + ("sew", "Wav2Vec2Processor"), + ("sew-d", "Wav2Vec2Processor"), + ("siglip", "SiglipProcessor"), + ("speech_to_text", "Speech2TextProcessor"), + ("speech_to_text_2", "Speech2Text2Processor"), + ("speecht5", "SpeechT5Processor"), + ("trocr", "TrOCRProcessor"), + ("tvlt", "TvltProcessor"), + ("tvp", "TvpProcessor"), + ("unispeech", "Wav2Vec2Processor"), + ("unispeech-sat", "Wav2Vec2Processor"), + ("vilt", "ViltProcessor"), + ("vipllava", "LlavaProcessor"), + ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"), + ("wav2vec2", "Wav2Vec2Processor"), + ("wav2vec2-bert", "Wav2Vec2Processor"), + ("wav2vec2-conformer", "Wav2Vec2Processor"), + ("wavlm", "Wav2Vec2Processor"), + ("whisper", "WhisperProcessor"), + ("xclip", "XCLIPProcessor"), + ] +) + +PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES) + + +def processor_class_from_name(class_name: str): + for module_name, processors in PROCESSOR_MAPPING_NAMES.items(): + if class_name in processors: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for processor in PROCESSOR_MAPPING._extra_content.values(): + if getattr(processor, "__name__", None) == class_name: + return processor + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +class AutoProcessor: + r""" + This is a generic processor class that will be instantiated as one of the processor classes of the library when + created with the [`AutoProcessor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoProcessor is designed to be instantiated " + "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the processor classes of the library from a pretrained model vocabulary. + + The processor class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible): + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a processor files saved using the `save_pretrained()` method, + e.g., `./my_model_directory/`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model feature extractor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the feature extractor files and override the cached versions + if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final feature extractor object. If `True`, then this + functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of + `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are feature extractor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoProcessor + + >>> # Download processor from huggingface.co and cache. + >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") + + >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*) + >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + processor_class = None + processor_auto_map = None + + # First, let's see if we have a processor or preprocessor config. + # Filter the kwargs for `get_file_from_repo`. + get_file_from_repo_kwargs = { + key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs + } + + # Let's start by checking whether the processor class is saved in a processor config + processor_config_file = get_file_from_repo( + pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs + ) + if processor_config_file is not None: + config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs) + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + if processor_class is None: + # If not found, let's check whether the processor class is saved in an image processor config + preprocessor_config_file = get_file_from_repo( + pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs + ) + if preprocessor_config_file is not None: + config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + # If not found, let's check whether the processor class is saved in a feature extractor config + if preprocessor_config_file is not None and processor_class is None: + config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict( + pretrained_model_name_or_path, **kwargs + ) + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + if processor_class is None: + # Next, let's check whether the processor class is saved in a tokenizer + tokenizer_config_file = get_file_from_repo( + pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs + ) + if tokenizer_config_file is not None: + with open(tokenizer_config_file, encoding="utf-8") as reader: + config_dict = json.load(reader) + + processor_class = config_dict.get("processor_class", None) + if "AutoProcessor" in config_dict.get("auto_map", {}): + processor_auto_map = config_dict["auto_map"]["AutoProcessor"] + + if processor_class is None: + # Otherwise, load config, if it can be loaded. + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + + # And check if the config contains the processor class. + processor_class = getattr(config, "processor_class", None) + if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map: + processor_auto_map = config.auto_map["AutoProcessor"] + + if processor_class is not None: + processor_class = processor_class_from_name(processor_class) + + has_remote_code = processor_auto_map is not None + has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + processor_class = get_class_from_dynamic_module( + processor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + processor_class.register_for_auto_class() + return processor_class.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + elif processor_class is not None: + return processor_class.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + # Last try: we use the PROCESSOR_MAPPING. + elif type(config) in PROCESSOR_MAPPING: + return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs) + + # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a + # tokenizer. + try: + return AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + try: + return AutoImageProcessor.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + pass + + try: + return AutoFeatureExtractor.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + pass + + raise ValueError( + f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a " + "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains " + "the files of at least one of those processing classes." + ) + + @staticmethod + def register(config_class, processor_class, exist_ok=False): + """ + Register a new processor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + processor_class ([`FeatureExtractorMixin`]): The processor to register. + """ + PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok) diff --git a/venv/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py b/venv/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..99706afe1655e30164062c035b29ab20d8065ff6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py @@ -0,0 +1,936 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Auto Tokenizer class.""" + +import importlib +import json +import os +import warnings +from collections import OrderedDict +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union + +from ...configuration_utils import PretrainedConfig +from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code +from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE +from ...utils import ( + cached_file, + extract_commit_hash, + is_g2p_en_available, + is_sentencepiece_available, + is_tokenizers_available, + logging, +) +from ..encoder_decoder import EncoderDecoderConfig +from .auto_factory import _LazyAutoMapping +from .configuration_auto import ( + CONFIG_MAPPING_NAMES, + AutoConfig, + config_class_to_model_type, + model_type_to_module_name, + replace_list_option_in_docstrings, +) + + +if is_tokenizers_available(): + from ...tokenization_utils_fast import PreTrainedTokenizerFast +else: + PreTrainedTokenizerFast = None + + +logger = logging.get_logger(__name__) + +if TYPE_CHECKING: + # This significantly improves completion suggestion performance when + # the transformers package is used with Microsoft's Pylance language server. + TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict() +else: + TOKENIZER_MAPPING_NAMES = OrderedDict( + [ + ( + "albert", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("align", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bark", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bart", ("BartTokenizer", "BartTokenizerFast")), + ( + "barthez", + ( + "BarthezTokenizer" if is_sentencepiece_available() else None, + "BarthezTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("bartpho", ("BartphoTokenizer", None)), + ("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)), + ("bert-japanese", ("BertJapaneseTokenizer", None)), + ("bertweet", ("BertweetTokenizer", None)), + ( + "big_bird", + ( + "BigBirdTokenizer" if is_sentencepiece_available() else None, + "BigBirdTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)), + ("biogpt", ("BioGptTokenizer", None)), + ("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")), + ("blenderbot-small", ("BlenderbotSmallTokenizer", None)), + ("blip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("blip-2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("bloom", (None, "BloomTokenizerFast" if is_tokenizers_available() else None)), + ("bridgetower", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("bros", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("byt5", ("ByT5Tokenizer", None)), + ( + "camembert", + ( + "CamembertTokenizer" if is_sentencepiece_available() else None, + "CamembertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("canine", ("CanineTokenizer", None)), + ("chinese_clip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "clap", + ( + "RobertaTokenizer", + "RobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "clip", + ( + "CLIPTokenizer", + "CLIPTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "clipseg", + ( + "CLIPTokenizer", + "CLIPTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("clvp", ("ClvpTokenizer", None)), + ( + "code_llama", + ( + "CodeLlamaTokenizer" if is_sentencepiece_available() else None, + "CodeLlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("codegen", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)), + ("cohere", (None, "CohereTokenizerFast" if is_tokenizers_available() else None)), + ("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)), + ( + "cpm", + ( + "CpmTokenizer" if is_sentencepiece_available() else None, + "CpmTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("cpmant", ("CpmAntTokenizer", None)), + ("ctrl", ("CTRLTokenizer", None)), + ("data2vec-audio", ("Wav2Vec2CTCTokenizer", None)), + ("data2vec-text", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("dbrx", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "deberta-v2", + ( + "DebertaV2Tokenizer" if is_sentencepiece_available() else None, + "DebertaV2TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)), + ( + "dpr", + ( + "DPRQuestionEncoderTokenizer", + "DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)), + ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)), + ("esm", ("EsmTokenizer", None)), + ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)), + ( + "fastspeech2_conformer", + ("FastSpeech2ConformerTokenizer" if is_g2p_en_available() else None, None), + ), + ("flaubert", ("FlaubertTokenizer", None)), + ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)), + ("fsmt", ("FSMTTokenizer", None)), + ("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)), + ( + "gemma", + ( + "GemmaTokenizer" if is_sentencepiece_available() else None, + "GemmaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("git", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("gpt-sw3", ("GPTSw3Tokenizer" if is_sentencepiece_available() else None, None)), + ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_bigcode", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)), + ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("gptsan-japanese", ("GPTSanJapaneseTokenizer", None)), + ("grounding-dino", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)), + ("hubert", ("Wav2Vec2CTCTokenizer", None)), + ("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("idefics2", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ( + "jamba", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("jukebox", ("JukeboxTokenizer", None)), + ( + "kosmos-2", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)), + ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)), + ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)), + ("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)), + ("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)), + ("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)), + ( + "llama", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("llava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("llava_next", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)), + ( + "longt5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("luke", ("LukeTokenizer", None)), + ("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)), + ("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)), + ("mamba", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)), + ( + "mbart", + ( + "MBartTokenizer" if is_sentencepiece_available() else None, + "MBartTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "mbart50", + ( + "MBart50Tokenizer" if is_sentencepiece_available() else None, + "MBart50TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("mega", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ("megatron-bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("mgp-str", ("MgpstrTokenizer", None)), + ( + "mistral", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "mixtral", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)), + ("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)), + ("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)), + ("mpt", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("mra", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "mt5", + ( + "MT5Tokenizer" if is_sentencepiece_available() else None, + "MT5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)), + ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "nllb", + ( + "NllbTokenizer" if is_sentencepiece_available() else None, + "NllbTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "nllb-moe", + ( + "NllbTokenizer" if is_sentencepiece_available() else None, + "NllbTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "nystromformer", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("olmo", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("oneformer", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ( + "openai-gpt", + ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None), + ), + ("opt", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ("owlv2", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ("owlvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ( + "pegasus", + ( + "PegasusTokenizer" if is_sentencepiece_available() else None, + "PegasusTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "pegasus_x", + ( + "PegasusTokenizer" if is_sentencepiece_available() else None, + "PegasusTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "perceiver", + ( + "PerceiverTokenizer", + None, + ), + ), + ( + "persimmon", + ( + "LlamaTokenizer" if is_sentencepiece_available() else None, + "LlamaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)), + ("phobert", ("PhobertTokenizer", None)), + ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)), + ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)), + ("prophetnet", ("ProphetNetTokenizer", None)), + ("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "qwen2", + ( + "Qwen2Tokenizer", + "Qwen2TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "qwen2_moe", + ( + "Qwen2Tokenizer", + "Qwen2TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("rag", ("RagTokenizer", None)), + ("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)), + ( + "recurrent_gemma", + ( + "GemmaTokenizer" if is_sentencepiece_available() else None, + "GemmaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "reformer", + ( + "ReformerTokenizer" if is_sentencepiece_available() else None, + "ReformerTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "rembert", + ( + "RemBertTokenizer" if is_sentencepiece_available() else None, + "RemBertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)), + ("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "roberta-prelayernorm", + ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None), + ), + ("roc_bert", ("RoCBertTokenizer", None)), + ("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)), + ("rwkv", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ( + "seamless_m4t", + ( + "SeamlessM4TTokenizer" if is_sentencepiece_available() else None, + "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "seamless_m4t_v2", + ( + "SeamlessM4TTokenizer" if is_sentencepiece_available() else None, + "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("siglip", ("SiglipTokenizer" if is_sentencepiece_available() else None, None)), + ("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)), + ("speech_to_text_2", ("Speech2Text2Tokenizer", None)), + ("speecht5", ("SpeechT5Tokenizer" if is_sentencepiece_available() else None, None)), + ("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")), + ( + "squeezebert", + ("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None), + ), + ("stablelm", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("starcoder2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), + ( + "switch_transformers", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "t5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("tapas", ("TapasTokenizer", None)), + ("tapex", ("TapexTokenizer", None)), + ("transfo-xl", ("TransfoXLTokenizer", None)), + ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ( + "udop", + ( + "UdopTokenizer" if is_sentencepiece_available() else None, + "UdopTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "umt5", + ( + "T5Tokenizer" if is_sentencepiece_available() else None, + "T5TokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("vilt", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("vipllava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)), + ("visual_bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), + ("vits", ("VitsTokenizer", None)), + ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)), + ("wav2vec2-bert", ("Wav2Vec2CTCTokenizer", None)), + ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)), + ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)), + ("whisper", ("WhisperTokenizer", "WhisperTokenizerFast" if is_tokenizers_available() else None)), + ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), + ( + "xglm", + ( + "XGLMTokenizer" if is_sentencepiece_available() else None, + "XGLMTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ("xlm", ("XLMTokenizer", None)), + ("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)), + ( + "xlm-roberta", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xlm-roberta-xl", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xlnet", + ( + "XLNetTokenizer" if is_sentencepiece_available() else None, + "XLNetTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "xmod", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ( + "yoso", + ( + "AlbertTokenizer" if is_sentencepiece_available() else None, + "AlbertTokenizerFast" if is_tokenizers_available() else None, + ), + ), + ] + ) + +TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES) + +CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()} + + +def tokenizer_class_from_name(class_name: str): + if class_name == "PreTrainedTokenizerFast": + return PreTrainedTokenizerFast + + for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items(): + if class_name in tokenizers: + module_name = model_type_to_module_name(module_name) + + module = importlib.import_module(f".{module_name}", "transformers.models") + try: + return getattr(module, class_name) + except AttributeError: + continue + + for config, tokenizers in TOKENIZER_MAPPING._extra_content.items(): + for tokenizer in tokenizers: + if getattr(tokenizer, "__name__", None) == class_name: + return tokenizer + + # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main + # init and we return the proper dummy to get an appropriate error message. + main_module = importlib.import_module("transformers") + if hasattr(main_module, class_name): + return getattr(main_module, class_name) + + return None + + +def get_tokenizer_config( + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", + **kwargs, +): + """ + Loads the tokenizer configuration from a pretrained model tokenizer configuration. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the tokenizer config is located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + + + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the tokenizer. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased") + # This model does not have a tokenizer config so the result will be an empty dict. + tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base") + + # Save a pretrained tokenizer locally and you can reload its config + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") + tokenizer.save_pretrained("tokenizer-test") + tokenizer_config = get_tokenizer_config("tokenizer-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + commit_hash = kwargs.get("_commit_hash", None) + resolved_config_file = cached_file( + pretrained_model_name_or_path, + TOKENIZER_CONFIG_FILE, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _raise_exceptions_for_gated_repo=False, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, + ) + if resolved_config_file is None: + logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.") + return {} + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + + with open(resolved_config_file, encoding="utf-8") as reader: + result = json.load(reader) + result["_commit_hash"] = commit_hash + return result + + +class AutoTokenizer: + r""" + This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when + created with the [`AutoTokenizer.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoTokenizer is designed to be instantiated " + "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): + r""" + Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary. + + The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either + passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by + falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + Can be either: + + - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. + - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved + using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a + single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not + applicable to all derived classes) + inputs (additional positional arguments, *optional*): + Will be passed along to the Tokenizer `__init__()` method. + config ([`PretrainedConfig`], *optional*) + The configuration object used to determine the tokenizer class to instantiate. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force the (re-)download the model weights and configuration files and override the + cached versions if they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received files. Will attempt to resume the download if such a + file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + subfolder (`str`, *optional*): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for + facebook/rag-token-base), specify it here. + use_fast (`bool`, *optional*, defaults to `True`): + Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for + a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer + is returned instead. + tokenizer_type (`str`, *optional*): + Tokenizer type to be loaded. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (additional keyword arguments, *optional*): + Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like + `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, + `additional_special_tokens`. See parameters in the `__init__()` for more details. + + Examples: + + ```python + >>> from transformers import AutoTokenizer + + >>> # Download vocabulary from huggingface.co and cache. + >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") + + >>> # Download vocabulary from huggingface.co (user-uploaded) and cache. + >>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased") + + >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) + >>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/") + + >>> # Download vocabulary from huggingface.co and define model-specific arguments + >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True) + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + kwargs["_from_auto"] = True + + use_fast = kwargs.pop("use_fast", True) + tokenizer_type = kwargs.pop("tokenizer_type", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + + # First, let's see whether the tokenizer_type is passed so that we can leverage it + if tokenizer_type is not None: + tokenizer_class = None + tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None) + + if tokenizer_class_tuple is None: + raise ValueError( + f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of " + f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}." + ) + + tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple + + if use_fast: + if tokenizer_fast_class_name is not None: + tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name) + else: + logger.warning( + "`use_fast` is set to `True` but the tokenizer class does not have a fast version. " + " Falling back to the slow version." + ) + if tokenizer_class is None: + tokenizer_class = tokenizer_class_from_name(tokenizer_class_name) + + if tokenizer_class is None: + raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.") + + return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + + # Next, let's try to use the tokenizer_config file to get the tokenizer class. + tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) + if "_commit_hash" in tokenizer_config: + kwargs["_commit_hash"] = tokenizer_config["_commit_hash"] + config_tokenizer_class = tokenizer_config.get("tokenizer_class") + tokenizer_auto_map = None + if "auto_map" in tokenizer_config: + if isinstance(tokenizer_config["auto_map"], (tuple, list)): + # Legacy format for dynamic tokenizers + tokenizer_auto_map = tokenizer_config["auto_map"] + else: + tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None) + + # If that did not work, let's try to use the config. + if config_tokenizer_class is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + config_tokenizer_class = config.tokenizer_class + if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map: + tokenizer_auto_map = config.auto_map["AutoTokenizer"] + + has_remote_code = tokenizer_auto_map is not None + has_local_code = type(config) in TOKENIZER_MAPPING or ( + config_tokenizer_class is not None + and ( + tokenizer_class_from_name(config_tokenizer_class) is not None + or tokenizer_class_from_name(config_tokenizer_class + "Fast") is not None + ) + ) + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + if use_fast and tokenizer_auto_map[1] is not None: + class_ref = tokenizer_auto_map[1] + else: + class_ref = tokenizer_auto_map[0] + tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + tokenizer_class.register_for_auto_class() + return tokenizer_class.from_pretrained( + pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs + ) + elif config_tokenizer_class is not None: + tokenizer_class = None + if use_fast and not config_tokenizer_class.endswith("Fast"): + tokenizer_class_candidate = f"{config_tokenizer_class}Fast" + tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) + if tokenizer_class is None: + tokenizer_class_candidate = config_tokenizer_class + tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate) + if tokenizer_class is None: + raise ValueError( + f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported." + ) + return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + + # Otherwise we have to be creative. + # if model is an encoder decoder, the encoder tokenizer class is used by default + if isinstance(config, EncoderDecoderConfig): + if type(config.decoder) is not type(config.encoder): # noqa: E721 + logger.warning( + f"The encoder model config class: {config.encoder.__class__} is different from the decoder model " + f"config class: {config.decoder.__class__}. It is not recommended to use the " + "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder " + "specific tokenizer classes." + ) + config = config.encoder + + model_type = config_class_to_model_type(type(config).__name__) + if model_type is not None: + tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)] + if tokenizer_class_fast and (use_fast or tokenizer_class_py is None): + return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + else: + if tokenizer_class_py is not None: + return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + else: + raise ValueError( + "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed " + "in order to use this tokenizer." + ) + + raise ValueError( + f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n" + f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}." + ) + + def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False): + """ + Register a new tokenizer in this mapping. + + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + slow_tokenizer_class ([`PretrainedTokenizer`], *optional*): + The slow tokenizer to register. + fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*): + The fast tokenizer to register. + """ + if slow_tokenizer_class is None and fast_tokenizer_class is None: + raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class") + if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast): + raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.") + if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer): + raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.") + + if ( + slow_tokenizer_class is not None + and fast_tokenizer_class is not None + and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) + and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class + ): + raise ValueError( + "The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not " + "consistent with the slow tokenizer class you passed (fast tokenizer has " + f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those " + "so they match!" + ) + + # Avoid resetting a set slow/fast tokenizer if we are passing just the other ones. + if config_class in TOKENIZER_MAPPING._extra_content: + existing_slow, existing_fast = TOKENIZER_MAPPING[config_class] + if slow_tokenizer_class is None: + slow_tokenizer_class = existing_slow + if fast_tokenizer_class is None: + fast_tokenizer_class = existing_fast + + TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4f7e775431dd0a250dbbb5ca422f1a81be919225 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"], + "tokenization_lxmert": ["LxmertTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_lxmert_fast"] = ["LxmertTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_lxmert"] = [ + "LxmertEncoder", + "LxmertForPreTraining", + "LxmertForQuestionAnswering", + "LxmertModel", + "LxmertPreTrainedModel", + "LxmertVisualFeatureEncoder", + "LxmertXLayer", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_lxmert"] = [ + "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLxmertForPreTraining", + "TFLxmertMainLayer", + "TFLxmertModel", + "TFLxmertPreTrainedModel", + "TFLxmertVisualFeatureEncoder", + ] + + +if TYPE_CHECKING: + from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig + from .tokenization_lxmert import LxmertTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_lxmert_fast import LxmertTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_lxmert import ( + LxmertEncoder, + LxmertForPreTraining, + LxmertForQuestionAnswering, + LxmertModel, + LxmertPreTrainedModel, + LxmertVisualFeatureEncoder, + LxmertXLayer, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_lxmert import ( + TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLxmertForPreTraining, + TFLxmertMainLayer, + TFLxmertModel, + TFLxmertPreTrainedModel, + TFLxmertVisualFeatureEncoder, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b219e25eca3f44c6fb9e8a82bb4822d38236a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fc35097d2f3989fd2534319a01e21de9743f7df Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c8bf8971411b96c8da8dc3e0156063c8c6f3c05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/convert_lxmert_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def89199ef7776725eca8397c7b0d3fc3b8fcad3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73fe34fe7e5af16b9082acbc29b0abd4ec881bfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0651db5270dda7c1859fcd9baa15e0bca4fd65f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3bf1ba834e8f48accfc3069326accd5994ed73f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py new file mode 100644 index 0000000000000000000000000000000000000000..b79fb67908d27e8d36a19c30c3af296e2abe785a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# Copyright 2018, Hao Tan, Mohit Bansal +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" LXMERT model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class LxmertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used + to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating + a configuration with the defaults will yield a similar configuration to that of the Lxmert + [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_qa_labels (`int`, *optional*, defaults to 9500): + This represents the total number of different question answering (QA) labels there are. If using more than + one dataset with QA, the user will need to account for the total number of labels that all of the datasets + have in total. + num_object_labels (`int`, *optional*, defaults to 1600): + This represents the total number of semantically unique objects that lxmert will be able to classify a + pooled-object feature as belonging too. + num_attr_labels (`int`, *optional*, defaults to 400): + This represents the total number of semantically unique attributes that lxmert will be able to classify a + pooled-object feature as possessing. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the *token_type_ids* passed into [`BertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + l_layers (`int`, *optional*, defaults to 9): + Number of hidden layers in the Transformer language encoder. + x_layers (`int`, *optional*, defaults to 5): + Number of hidden layers in the Transformer cross modality encoder. + r_layers (`int`, *optional*, defaults to 5): + Number of hidden layers in the Transformer visual encoder. + visual_feat_dim (`int`, *optional*, defaults to 2048): + This represents the last dimension of the pooled-object features used as input for the model, representing + the size of each object feature itself. + visual_pos_dim (`int`, *optional*, defaults to 4): + This represents the number of spacial features that are mixed into the visual features. The default is set + to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height) + visual_loss_normalizer (`float`, *optional*, defaults to 6.67): + This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one + decided to train with multiple vision-based loss objectives. + task_matched (`bool`, *optional*, defaults to `True`): + This task is used for sentence-image matching. If the sentence correctly describes the image the label will + be 1. If the sentence does not correctly describe the image, the label will be 0. + task_mask_lm (`bool`, *optional*, defaults to `True`): + Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss + objective. + task_obj_predict (`bool`, *optional*, defaults to `True`): + Whether or not to add object prediction, attribute prediction and feature regression to the loss objective. + task_qa (`bool`, *optional*, defaults to `True`): + Whether or not to add the question-answering loss to the objective + visual_obj_loss (`bool`, *optional*, defaults to `True`): + Whether or not to calculate the object-prediction loss objective + visual_attr_loss (`bool`, *optional*, defaults to `True`): + Whether or not to calculate the attribute-prediction loss objective + visual_feat_loss (`bool`, *optional*, defaults to `True`): + Whether or not to calculate the feature-regression loss objective + """ + + model_type = "lxmert" + attribute_map = {} + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_attention_heads=12, + num_qa_labels=9500, + num_object_labels=1600, + num_attr_labels=400, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + l_layers=9, + x_layers=5, + r_layers=5, + visual_feat_dim=2048, + visual_pos_dim=4, + visual_loss_normalizer=6.67, + task_matched=True, + task_mask_lm=True, + task_obj_predict=True, + task_qa=True, + visual_obj_loss=True, + visual_attr_loss=True, + visual_feat_loss=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.num_qa_labels = num_qa_labels + self.num_object_labels = num_object_labels + self.num_attr_labels = num_attr_labels + self.l_layers = l_layers + self.x_layers = x_layers + self.r_layers = r_layers + self.visual_feat_dim = visual_feat_dim + self.visual_pos_dim = visual_pos_dim + self.visual_loss_normalizer = visual_loss_normalizer + self.task_matched = task_matched + self.task_mask_lm = task_mask_lm + self.task_obj_predict = task_obj_predict + self.task_qa = task_qa + self.visual_obj_loss = visual_obj_loss + self.visual_attr_loss = visual_attr_loss + self.visual_feat_loss = visual_feat_loss + self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} + super().__init__(**kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..f8eb86f1d1e48a1459154b647fb2f4178df338b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert LXMERT checkpoint.""" + + +import argparse + +import torch + +from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): + # Initialise PyTorch model + config = LxmertConfig.from_json_file(config_file) + print(f"Building PyTorch model from configuration: {config}") + model = LxmertForPreTraining(config) + + # Load weights from tf checkpoint + load_tf_weights_in_lxmert(model, config, tf_checkpoint_path) + + # Save pytorch-model + print(f"Save PyTorch model to {pytorch_dump_path}") + torch.save(model.state_dict(), pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py new file mode 100644 index 0000000000000000000000000000000000000000..6e2ae7d22e7cacccb7b8ab6213961c0a9c7ce68a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py @@ -0,0 +1,1434 @@ +# coding=utf-8 +# Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch LXMERT model.""" + + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss, SmoothL1Loss + +from ...activations import ACT2FN, gelu +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_lxmert import LxmertConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" +_CONFIG_FOR_DOC = "LxmertConfig" + + +class GeLU(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return gelu(x) + + +@dataclass +class LxmertModelOutput(ModelOutput): + """ + Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, + visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" + encoder") + + + Args: + language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the language encoder. + vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the visual encoder. + pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed + by a Linear layer and a Tanh activation function. The Linear + language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + language_output: Optional[torch.FloatTensor] = None + vision_output: Optional[torch.FloatTensor] = None + pooled_output: Optional[torch.FloatTensor] = None + language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + language_attentions: Optional[Tuple[torch.FloatTensor]] = None + vision_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LxmertForQuestionAnsweringOutput(ModelOutput): + """ + Output type of [`LxmertForQuestionAnswering`]. + + Args: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss.k. + question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*): + Prediction scores of question answering objective (classification). + language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + question_answering_score: Optional[torch.FloatTensor] = None + language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + language_attentions: Optional[Tuple[torch.FloatTensor]] = None + vision_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LxmertForPreTrainingOutput(ModelOutput): + """ + Output type of [`LxmertForPreTraining`]. + + Args: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the textual matching objective (classification) head (scores of True/False + continuation before SoftMax). + question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`): + Prediction scores of question answering objective (classification). + language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of + shape `(batch_size, sequence_length, hidden_size)`. + language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: Optional[torch.FloatTensor] = None + cross_relationship_score: Optional[torch.FloatTensor] = None + question_answering_score: Optional[torch.FloatTensor] = None + language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + language_attentions: Optional[Tuple[torch.FloatTensor]] = None + vision_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n + in [ + "adam_v", + "adam_m", + "AdamWeightDecayOptimizer", + "AdamWeightDecayOptimizer_1", + "global_step", + ] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class LxmertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + device = input_ids.device + else: + input_shape = inputs_embeds.size()[:-1] + device = inputs_embeds.device + seq_length = input_shape[1] + + position_ids = torch.arange(seq_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand(input_shape) + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class LxmertAttention(nn.Module): + def __init__(self, config, ctx_dim=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.head_size = self.num_attention_heads * self.attention_head_size + + # visual_dim = 2048 + if ctx_dim is None: + ctx_dim = config.hidden_size + self.query = nn.Linear(config.hidden_size, self.head_size) + self.key = nn.Linear(ctx_dim, self.head_size) + self.value = nn.Linear(ctx_dim, self.head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, context, attention_mask=None, output_attentions=False): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(context) + mixed_value_layer = self.value(context) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + return outputs + + +class LxmertAttentionOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class LxmertCrossAttentionLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.att = LxmertAttention(config) + self.output = LxmertAttentionOutput(config) + + def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False): + output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions) + if output_attentions: + attention_probs = output[1] + attention_output = self.output(output[0], input_tensor) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) + return outputs + + +class LxmertSelfAttentionLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.self = LxmertAttention(config) + self.output = LxmertAttentionOutput(config) + + def forward(self, input_tensor, attention_mask, output_attentions=False): + # Self attention attends to itself, thus keys and queries are the same (input_tensor). + output = self.self( + input_tensor, + input_tensor, + attention_mask, + output_attentions=output_attentions, + ) + if output_attentions: + attention_probs = output[1] + attention_output = self.output(output[0], input_tensor) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) + return outputs + + +class LxmertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + self.intermediate_act_fn = ACT2FN[config.hidden_act] + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class LxmertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class LxmertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = LxmertSelfAttentionLayer(config) + self.intermediate = LxmertIntermediate(config) + self.output = LxmertOutput(config) + + def forward(self, hidden_states, attention_mask=None, output_attentions=False): + outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) + attention_output = outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + outputs[1:] # add attentions if we output them + return outputs + + +class LxmertXLayer(nn.Module): + def __init__(self, config): + super().__init__() + # The cross-attention Layer + self.visual_attention = LxmertCrossAttentionLayer(config) + + # Self-attention Layers + self.lang_self_att = LxmertSelfAttentionLayer(config) + self.visn_self_att = LxmertSelfAttentionLayer(config) + + # Intermediate and Output Layers (FFNs) + self.lang_inter = LxmertIntermediate(config) + self.lang_output = LxmertOutput(config) + self.visn_inter = LxmertIntermediate(config) + self.visn_output = LxmertOutput(config) + + def cross_att( + self, + lang_input, + lang_attention_mask, + visual_input, + visual_attention_mask, + output_x_attentions=False, + ): + # Cross Attention + lang_att_output = self.visual_attention( + lang_input, + visual_input, + ctx_att_mask=visual_attention_mask, + output_attentions=output_x_attentions, + ) + visual_att_output = self.visual_attention( + visual_input, + lang_input, + ctx_att_mask=lang_attention_mask, + output_attentions=False, + ) + return lang_att_output, visual_att_output + + def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask): + # Self Attention + lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False) + visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False) + return lang_att_output[0], visual_att_output[0] + + def output_fc(self, lang_input, visual_input): + # FC layers + lang_inter_output = self.lang_inter(lang_input) + visual_inter_output = self.visn_inter(visual_input) + + # Layer output + lang_output = self.lang_output(lang_inter_output, lang_input) + visual_output = self.visn_output(visual_inter_output, visual_input) + + return lang_output, visual_output + + def forward( + self, + lang_feats, + lang_attention_mask, + visual_feats, + visual_attention_mask, + output_attentions=False, + ): + lang_att_output, visual_att_output = self.cross_att( + lang_input=lang_feats, + lang_attention_mask=lang_attention_mask, + visual_input=visual_feats, + visual_attention_mask=visual_attention_mask, + output_x_attentions=output_attentions, + ) + attention_probs = lang_att_output[1:] + lang_att_output, visual_att_output = self.self_att( + lang_att_output[0], + lang_attention_mask, + visual_att_output[0], + visual_attention_mask, + ) + + lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output) + return ( + ( + lang_output, + visual_output, + attention_probs[0], + ) + if output_attentions + else (lang_output, visual_output) + ) + + +class LxmertVisualFeatureEncoder(nn.Module): + def __init__(self, config): + super().__init__() + feat_dim = config.visual_feat_dim + pos_dim = config.visual_pos_dim + + # Object feature encoding + self.visn_fc = nn.Linear(feat_dim, config.hidden_size) + self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) + + # Box position encoding + self.box_fc = nn.Linear(pos_dim, config.hidden_size) + self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12) + + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, visual_feats, visual_pos): + x = self.visn_fc(visual_feats) + x = self.visn_layer_norm(x) + y = self.box_fc(visual_pos) + y = self.box_layer_norm(y) + output = (x + y) / 2 + + output = self.dropout(output) + return output + + +class LxmertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + + # Obj-level image embedding layer + self.visn_fc = LxmertVisualFeatureEncoder(config) + self.config = config + + # Number of layers + self.num_l_layers = config.l_layers + self.num_x_layers = config.x_layers + self.num_r_layers = config.r_layers + + # Layers + # Using self.layer instead of self.l_layer to support loading BERT weights. + self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)]) + self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)]) + self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)]) + + def forward( + self, + lang_feats, + lang_attention_mask, + visual_feats, + visual_pos, + visual_attention_mask=None, + output_attentions=None, + ): + vision_hidden_states = () + language_hidden_states = () + vision_attentions = () if output_attentions or self.config.output_attentions else None + language_attentions = () if output_attentions or self.config.output_attentions else None + cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None + + visual_feats = self.visn_fc(visual_feats, visual_pos) + + # Run language layers + for layer_module in self.layer: + l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions) + lang_feats = l_outputs[0] + language_hidden_states = language_hidden_states + (lang_feats,) + if language_attentions is not None: + language_attentions = language_attentions + (l_outputs[1],) + + # Run relational layers + for layer_module in self.r_layers: + v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions) + visual_feats = v_outputs[0] + vision_hidden_states = vision_hidden_states + (visual_feats,) + if vision_attentions is not None: + vision_attentions = vision_attentions + (v_outputs[1],) + + # Run cross-modality layers + for layer_module in self.x_layers: + x_outputs = layer_module( + lang_feats, + lang_attention_mask, + visual_feats, + visual_attention_mask, + output_attentions=output_attentions, + ) + lang_feats, visual_feats = x_outputs[:2] + vision_hidden_states = vision_hidden_states + (visual_feats,) + language_hidden_states = language_hidden_states + (lang_feats,) + if cross_encoder_attentions is not None: + cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) + visual_encoder_outputs = ( + vision_hidden_states, + vision_attentions if output_attentions else None, + ) + lang_encoder_outputs = ( + language_hidden_states, + language_attentions if output_attentions else None, + ) + return ( + visual_encoder_outputs, + lang_encoder_outputs, + cross_encoder_attentions if output_attentions else None, + ) + + +class LxmertPooler(nn.Module): + def __init__(self, config): + super(LxmertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class LxmertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(LxmertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.transform_act_fn = ACT2FN[config.hidden_act] + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class LxmertLMPredictionHead(nn.Module): + def __init__(self, config, lxmert_model_embedding_weights): + super(LxmertLMPredictionHead, self).__init__() + self.transform = LxmertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + lxmert_model_embedding_weights.size(1), + lxmert_model_embedding_weights.size(0), + bias=False, + ) + self.decoder.weight = lxmert_model_embedding_weights + self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0))) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class LxmertVisualAnswerHead(nn.Module): + def __init__(self, config, num_labels): + super().__init__() + hid_dim = config.hidden_size + self.logit_fc = nn.Sequential( + nn.Linear(hid_dim, hid_dim * 2), + GeLU(), + nn.LayerNorm(hid_dim * 2, eps=1e-12), + nn.Linear(hid_dim * 2, num_labels), + ) + + def forward(self, hidden_states): + return self.logit_fc(hidden_states) + + +class LxmertVisualObjHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = LxmertPredictionHeadTransform(config) + # Decide the use of visual losses + visual_losses = {} + if config.visual_obj_loss: + visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} + if config.visual_attr_loss: + visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} + if config.visual_feat_loss: + visual_losses["feat"] = { + "shape": (-1, config.visual_feat_dim), + "num": config.visual_feat_dim, + } + self.visual_losses = visual_losses + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder_dict = nn.ModuleDict( + {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses} + ) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + output = {} + for key in self.visual_losses: + output[key] = self.decoder_dict[key](hidden_states) + return output + + +class LxmertPreTrainingHeads(nn.Module): + def __init__(self, config, lxmert_model_embedding_weights): + super(LxmertPreTrainingHeads, self).__init__() + self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class LxmertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = LxmertConfig + load_tf_weights = load_tf_weights_in_lxmert + base_model_prefix = "lxmert" + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +LXMERT_START_DOCSTRING = r""" + + The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from + Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer + model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual + genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss + for question answering attribute prediction, and object tag prediction. + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +LXMERT_INPUTS_DOCSTRING = r""" + + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): + This input represents visual features. They ROI pooled object features from bounding boxes using a + faster-RCNN model) + + These are currently not provided by the transformers library. + visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`): + This input represents spacial features corresponding to their relative (via index) visual features. The + pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to + 1. + + These are currently not provided by the transformers library. + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", + LXMERT_START_DOCSTRING, +) +class LxmertModel(LxmertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.embeddings = LxmertEmbeddings(config) + self.encoder = LxmertEncoder(config) + self.pooler = LxmertPooler(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings.word_embeddings = new_embeddings + + @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=LxmertModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + visual_feats: Optional[torch.FloatTensor] = None, + visual_pos: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + visual_attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if visual_feats is None: + raise ValueError("`visual_feats` cannot be `None`") + if visual_pos is None: + raise ValueError("`visual_pos` cannot be `None`") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) + extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min + + # Process the visual attention mask + if visual_attention_mask is not None: + extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2) + extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype) + extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min + else: + extended_visual_attention_mask = None + + # Positional Word Embeddings + embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds) + + # Run Lxmert encoder + encoder_outputs = self.encoder( + embedding_output, + extended_attention_mask, + visual_feats=visual_feats, + visual_pos=visual_pos, + visual_attention_mask=extended_visual_attention_mask, + output_attentions=output_attentions, + ) + + visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] + vision_hidden_states = visual_encoder_outputs[0] + language_hidden_states = lang_encoder_outputs[0] + + all_attentions = () + if output_attentions: + language_attentions = lang_encoder_outputs[1] + vision_attentions = visual_encoder_outputs[1] + cross_encoder_attentions = encoder_outputs[2] + all_attentions = ( + language_attentions, + vision_attentions, + cross_encoder_attentions, + ) + + hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () + + visual_output = vision_hidden_states[-1] + lang_output = language_hidden_states[-1] + pooled_output = self.pooler(lang_output) + + if not return_dict: + return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions + + return LxmertModelOutput( + pooled_output=pooled_output, + language_output=lang_output, + vision_output=visual_output, + language_hidden_states=language_hidden_states if output_hidden_states else None, + vision_hidden_states=vision_hidden_states if output_hidden_states else None, + language_attentions=language_attentions if output_attentions else None, + vision_attentions=vision_attentions if output_attentions else None, + cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, + ) + + +@add_start_docstrings( + """Lxmert Model with a specified pretraining head on top.""", + LXMERT_START_DOCSTRING, +) +class LxmertForPreTraining(LxmertPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight"] + + def __init__(self, config): + super().__init__(config) + # Configuration + self.config = config + self.num_qa_labels = config.num_qa_labels + self.visual_loss_normalizer = config.visual_loss_normalizer + + # Use of pretraining tasks + self.task_mask_lm = config.task_mask_lm + self.task_obj_predict = config.task_obj_predict + self.task_matched = config.task_matched + self.task_qa = config.task_qa + + # Lxmert backbone + self.lxmert = LxmertModel(config) + + # Pre-training heads + self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight) + if self.task_obj_predict: + self.obj_predict_head = LxmertVisualObjHead(config) + if self.task_qa: + self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) + + # Weight initialization + # Initialize weights and apply final processing + self.post_init() + + # Loss functions + self.loss_fcts = { + "l2": SmoothL1Loss(reduction="none"), + "visual_ce": CrossEntropyLoss(reduction="none"), + "ce": CrossEntropyLoss(), + } + + visual_losses = {} + if config.visual_obj_loss: + visual_losses["obj"] = { + "shape": (-1,), + "num": config.num_object_labels, + "loss": "visual_ce", + } + if config.visual_attr_loss: + visual_losses["attr"] = { + "shape": (-1,), + "num": config.num_attr_labels, + "loss": "visual_ce", + } + if config.visual_feat_loss: + visual_losses["feat"] = { + "shape": (-1, config.visual_feat_dim), + "num": config.visual_feat_dim, + "loss": "l2", + } + self.visual_losses = visual_losses + + def resize_num_qa_labels(self, num_labels): + """ + Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size + will add newly initialized weights. Reducing the size will remove weights from the end + + Args: + num_labels (`int`, *optional*): + New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized + weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just + returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer + """ + + cur_qa_logit_layer = self.get_qa_logit_layer() + if num_labels is None or cur_qa_logit_layer is None: + return + new_qa_logit_layer = self._resize_qa_labels(num_labels) + self.config.num_qa_labels = num_labels + self.num_qa_labels = num_labels + + return new_qa_logit_layer + + def _resize_qa_labels(self, num_labels): + cur_qa_logit_layer = self.get_qa_logit_layer() + new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) + self._set_qa_logit_layer(new_qa_logit_layer) + return self.get_qa_logit_layer() + + def get_qa_logit_layer(self) -> nn.Module: + """ + Returns the linear layer that produces question answering logits. + + Returns: + `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT + does not have a visual answering head. + """ + if hasattr(self, "answer_head"): + return self.answer_head.logit_fc[-1] + + def _set_qa_logit_layer(self, qa_logit_layer): + self.answer_head.logit_fc[-1] = qa_logit_layer + + def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): + if num_labels is None: + return cur_qa_logit_layer + + cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() + if cur_qa_labels == num_labels: + return cur_qa_logit_layer + + # Build new linear output + if getattr(cur_qa_logit_layer, "bias", None) is not None: + new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) + else: + new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) + + new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) + + # initialize all new labels + self._init_weights(new_qa_logit_layer) + + # Copy labels from the previous weights + num_labels_to_copy = min(cur_qa_labels, num_labels) + new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] + if getattr(cur_qa_logit_layer, "bias", None) is not None: + new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] + + return new_qa_logit_layer + + @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + visual_feats: Optional[torch.FloatTensor] = None, + visual_pos: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + visual_attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None, + matched_label: Optional[torch.LongTensor] = None, + ans: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*): + each key is named after each one of the visual losses and each element of the tuple is of the shape + `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and + the label score respectively + matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the whether or not the text input matches the image (classification) loss. Input + should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: + + - 0 indicates that the sentence does not match the image, + - 1 indicates that the sentence does match the image. + ans (`Torch.Tensor` of shape `(batch_size)`, *optional*): + a one hot representation hof the correct answer *optional* + + Returns: + """ + + if "masked_lm_labels" in kwargs: + warnings.warn( + "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`" + " instead.", + FutureWarning, + ) + labels = kwargs.pop("masked_lm_labels") + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + device = input_ids.device if input_ids is not None else inputs_embeds.device + lxmert_output = self.lxmert( + input_ids=input_ids, + visual_feats=visual_feats, + visual_pos=visual_pos, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + visual_attention_mask=visual_attention_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=return_dict, + ) + + lang_output, visual_output, pooled_output = ( + lxmert_output[0], + lxmert_output[1], + lxmert_output[2], + ) + lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) + if self.task_qa: + answer_score = self.answer_head(pooled_output) + else: + answer_score = pooled_output[0][0] + + total_loss = ( + None + if (labels is None and matched_label is None and obj_labels is None and ans is None) + else torch.tensor(0.0, device=device) + ) + if labels is not None and self.task_mask_lm: + masked_lm_loss = self.loss_fcts["ce"]( + lang_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + total_loss += masked_lm_loss + if matched_label is not None and self.task_matched: + matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1)) + total_loss += matched_loss + if obj_labels is not None and self.task_obj_predict: + total_visual_loss = torch.tensor(0.0, device=input_ids.device) + visual_prediction_scores_dict = self.obj_predict_head(visual_output) + for key, key_info in self.visual_losses.items(): + label, mask_conf = obj_labels[key] + output_dim = key_info["num"] + loss_fct_name = key_info["loss"] + label_shape = key_info["shape"] + weight = self.visual_loss_normalizer + visual_loss_fct = self.loss_fcts[loss_fct_name] + visual_prediction_scores = visual_prediction_scores_dict[key] + visual_loss = visual_loss_fct( + visual_prediction_scores.view(-1, output_dim), + label.view(label_shape), + ) + if visual_loss.dim() > 1: # Regression Losses + visual_loss = visual_loss.mean(1) + visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight + total_visual_loss += visual_loss + total_loss += total_visual_loss + if ans is not None and self.task_qa: + answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1)) + total_loss += answer_loss + + if not return_dict: + output = ( + lang_prediction_scores, + cross_relationship_score, + answer_score, + ) + lxmert_output[3:] + return ((total_loss,) + output) if total_loss is not None else output + + return LxmertForPreTrainingOutput( + loss=total_loss, + prediction_logits=lang_prediction_scores, + cross_relationship_score=cross_relationship_score, + question_answering_score=answer_score, + language_hidden_states=lxmert_output.language_hidden_states, + vision_hidden_states=lxmert_output.vision_hidden_states, + language_attentions=lxmert_output.language_attentions, + vision_attentions=lxmert_output.vision_attentions, + cross_encoder_attentions=lxmert_output.cross_encoder_attentions, + ) + + +@add_start_docstrings( + """Lxmert Model with a visual-answering head on top for downstream QA tasks""", + LXMERT_START_DOCSTRING, +) +class LxmertForQuestionAnswering(LxmertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + # Configuration + self.config = config + self.num_qa_labels = config.num_qa_labels + self.visual_loss_normalizer = config.visual_loss_normalizer + + # Lxmert backbone + self.lxmert = LxmertModel(config) + + self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels) + + # Weight initialization + # Initialize weights and apply final processing + self.post_init() + + # Loss function + self.loss = CrossEntropyLoss() + + def resize_num_qa_labels(self, num_labels): + """ + Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size + will add newly initialized weights. Reducing the size will remove weights from the end + + Args: + num_labels (`int`, *optional*): + New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized + weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just + returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything. + + Return: + `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer + """ + + cur_qa_logit_layer = self.get_qa_logit_layer() + if num_labels is None or cur_qa_logit_layer is None: + return + new_qa_logit_layer = self._resize_qa_labels(num_labels) + self.config.num_qa_labels = num_labels + self.num_qa_labels = num_labels + + return new_qa_logit_layer + + def _resize_qa_labels(self, num_labels): + cur_qa_logit_layer = self.get_qa_logit_layer() + new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels) + self._set_qa_logit_layer(new_qa_logit_layer) + return self.get_qa_logit_layer() + + def get_qa_logit_layer(self) -> nn.Module: + """ + Returns the linear layer that produces question answering logits + + Returns: + `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType + object if Lxmert does not have the visual answering head. + """ + + if hasattr(self, "answer_head"): + return self.answer_head.logit_fc[-1] + + def _set_qa_logit_layer(self, qa_logit_layer): + self.answer_head.logit_fc[-1] = qa_logit_layer + + def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels): + if num_labels is None: + return cur_qa_logit_layer + + cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size() + if cur_qa_labels == num_labels: + return cur_qa_logit_layer + + # Build new linear output + if getattr(cur_qa_logit_layer, "bias", None) is not None: + new_qa_logit_layer = nn.Linear(hidden_dim, num_labels) + else: + new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False) + + new_qa_logit_layer.to(cur_qa_logit_layer.weight.device) + + # initialize all new labels + self._init_weights(new_qa_logit_layer) + + # Copy labels from the previous weights + num_labels_to_copy = min(cur_qa_labels, num_labels) + new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :] + if getattr(cur_qa_logit_layer, "bias", None) is not None: + new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy] + + return new_qa_logit_layer + + @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=LxmertForQuestionAnsweringOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + visual_feats: Optional[torch.FloatTensor] = None, + visual_pos: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + visual_attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]: + r""" + labels (`Torch.Tensor` of shape `(batch_size)`, *optional*): + A one-hot representation of the correct answer + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + lxmert_output = self.lxmert( + input_ids=input_ids, + visual_feats=visual_feats, + visual_pos=visual_pos, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + visual_attention_mask=visual_attention_mask, + inputs_embeds=inputs_embeds, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=return_dict, + ) + + pooled_output = lxmert_output[2] + answer_score = self.answer_head(pooled_output) + loss = None + if labels is not None: + loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1)) + + if not return_dict: + output = (answer_score,) + lxmert_output[3:] + return (loss,) + output if loss is not None else output + + return LxmertForQuestionAnsweringOutput( + loss=loss, + question_answering_score=answer_score, + language_hidden_states=lxmert_output.language_hidden_states, + vision_hidden_states=lxmert_output.vision_hidden_states, + language_attentions=lxmert_output.language_attentions, + vision_attentions=lxmert_output.vision_attentions, + cross_encoder_attentions=lxmert_output.cross_encoder_attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py new file mode 100644 index 0000000000000000000000000000000000000000..c4741196031a793c2e58e6c03392b3b52d76fc79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py @@ -0,0 +1,1656 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the +# Lxmert Authors. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 LXMERT model.""" + + +from __future__ import annotations + +import warnings +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_utils import ( + TFModelInputType, + TFPreTrainedModel, + get_initializer, + keras, + keras_serializable, + shape_list, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, stable_softmax +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_lxmert import LxmertConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" +_CONFIG_FOR_DOC = "LxmertConfig" + + +from ..deprecated._archive_maps import TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class TFLxmertModelOutput(ModelOutput): + """ + Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, + visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" + encoder") + + + Args: + language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the language encoder. + vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the visual encoder. + pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed + by a Linear layer and a Tanh activation function. The Linear + language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape + `(batch_size, sequence_length, hidden_size)`. + vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape + `(batch_size, sequence_length, hidden_size)`. + language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + language_output: tf.Tensor | None = None + vision_output: tf.Tensor | None = None + pooled_output: tf.Tensor | None = None + language_hidden_states: Tuple[tf.Tensor] | None = None + vision_hidden_states: Tuple[tf.Tensor] | None = None + language_attentions: Tuple[tf.Tensor] | None = None + vision_attentions: Tuple[tf.Tensor] | None = None + cross_encoder_attentions: Tuple[tf.Tensor] | None = None + + +@dataclass +class TFLxmertForPreTrainingOutput(ModelOutput): + """ + Output type of [`LxmertForPreTraining`]. + + Args: + loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`): + Prediction scores of the textual matching objective (classification) head (scores of True/False + continuation before SoftMax). + question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): + Prediction scores of question answering objective (classification). + language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape + `(batch_size, sequence_length, hidden_size)`. + vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape + `(batch_size, sequence_length, hidden_size)`. + language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + + """ + + loss: tf.Tensor | None = None + prediction_logits: tf.Tensor | None = None + cross_relationship_score: tf.Tensor | None = None + question_answering_score: tf.Tensor | None = None + language_hidden_states: Tuple[tf.Tensor] | None = None + vision_hidden_states: Tuple[tf.Tensor] | None = None + language_attentions: Tuple[tf.Tensor] | None = None + vision_attentions: Tuple[tf.Tensor] | None = None + cross_encoder_attentions: Tuple[tf.Tensor] | None = None + + +class TFLxmertVisualFeatureEncoder(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + # Object feature encoding + self.visn_fc = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + name="visn_fc", + ) + self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm") + + # Box position encoding + self.box_fc = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + name="box_fc", + ) + self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") + + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.feat_dim = config.visual_feat_dim + self.pos_dim = config.visual_pos_dim + self.config = config + + def call(self, visn_input, training=False): + feats, boxes = visn_input + + x = self.visn_fc(feats) + x = self.visn_layer_norm(x) + y = self.box_fc(boxes) + y = self.box_layer_norm(y) + output = (x + y) / 2 + + output = self.dropout(output, training=training) + return output + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "visn_fc", None) is not None: + with tf.name_scope(self.visn_fc.name): + self.visn_fc.build([None, None, self.feat_dim]) + if getattr(self, "visn_layer_norm", None) is not None: + with tf.name_scope(self.visn_layer_norm.name): + self.visn_layer_norm.build([None, None, self.config.hidden_size]) + if getattr(self, "box_fc", None) is not None: + with tf.name_scope(self.box_fc.name): + self.box_fc.build([None, None, self.pos_dim]) + if getattr(self, "box_layer_norm", None) is not None: + with tf.name_scope(self.box_layer_norm.name): + self.box_layer_norm.build([None, None, self.config.hidden_size]) + + +class TFLxmertEmbeddings(keras.layers.Layer): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.hidden_size = config.hidden_size + self.max_position_embeddings = config.max_position_embeddings + self.initializer_range = config.initializer_range + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def build(self, input_shape=None): + with tf.name_scope("word_embeddings"): + self.weight = self.add_weight( + name="weight", + shape=[self.config.vocab_size, self.hidden_size], + initializer=get_initializer(initializer_range=self.initializer_range), + ) + + with tf.name_scope("token_type_embeddings"): + self.token_type_embeddings = self.add_weight( + name="embeddings", + shape=[self.config.type_vocab_size, self.hidden_size], + initializer=get_initializer(initializer_range=self.initializer_range), + ) + + with tf.name_scope("position_embeddings"): + self.position_embeddings = self.add_weight( + name="embeddings", + shape=[self.max_position_embeddings, self.hidden_size], + initializer=get_initializer(initializer_range=self.initializer_range), + ) + + if self.built: + return + self.built = True + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + assert not (input_ids is None and inputs_embeds is None) + + if input_ids is not None: + check_embeddings_within_bounds(input_ids, self.config.vocab_size) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) + position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) + token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) + final_embeddings = inputs_embeds + position_embeds + token_type_embeds + final_embeddings = self.LayerNorm(inputs=final_embeddings) + final_embeddings = self.dropout(inputs=final_embeddings, training=training) + + return final_embeddings + + +class TFLxmertAttention(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads}" + ) + + self.num_attention_heads = config.num_attention_heads + assert config.hidden_size % config.num_attention_heads == 0 + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="query", + ) + self.key = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="key", + ) + self.value = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="value", + ) + + self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) + self.ctx_dim = config.hidden_size + self.config = config + + def transpose_for_scores(self, x, batch_size): + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + return tf.transpose(x, perm=[0, 2, 1, 3]) + + def call(self, hidden_states, context, attention_mask, output_attentions, training=False): + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(context) + mixed_value_layer = self.value(context) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = tf.matmul( + query_layer, key_layer, transpose_b=True + ) # (batch size, num_heads, seq_len_q, seq_len_k) + dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores + attention_scores = attention_scores / tf.math.sqrt(dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) + attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = stable_softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + context_layer = tf.matmul(attention_probs, value_layer) + + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + context_layer = tf.reshape( + context_layer, (batch_size, -1, self.all_head_size) + ) # (batch_size, seq_len_q, all_head_size) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "query", None) is not None: + with tf.name_scope(self.query.name): + self.query.build([None, None, self.config.hidden_size]) + if getattr(self, "key", None) is not None: + with tf.name_scope(self.key.name): + self.key.build([None, None, self.ctx_dim]) + if getattr(self, "value", None) is not None: + with tf.name_scope(self.value.name): + self.value.build([None, None, self.ctx_dim]) + + +class TFLxmertIntermediate(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = keras.layers.Dense( + config.intermediate_size, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + self.config = config + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +class TFLxmertOutput(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states, input_tensor, training=False): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.intermediate_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +class TFLxmertAttentionOutput(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states, input_tensor, training=False): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +class TFLxmertSelfAttentionLayer(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.self = TFLxmertAttention(config, name="self") + self.attention_output = TFLxmertAttentionOutput(config, name="output") + + def call(self, input_tensor, attention_mask, output_attentions, training=False): + # Self attention attends to itself, thus keys and queries are the same (input_tensor). + self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) + if output_attentions: + attention_probs = self_output[1] + attention_output = self.attention_output(self_output[0], input_tensor) + return (attention_output, attention_probs) if output_attentions else (attention_output,) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self", None) is not None: + with tf.name_scope(self.self.name): + self.self.build(None) + if getattr(self, "attention_output", None) is not None: + with tf.name_scope(self.attention_output.name): + self.attention_output.build(None) + + +class TFLxmertCrossAttentionLayer(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.att = TFLxmertAttention(config, name="att") + self.attention_output = TFLxmertAttentionOutput(config, name="output") + + def call( + self, + input_tensor, + ctx_tensor, + ctx_att_mask, + output_attentions=False, + training=False, + ): + output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) + if output_attentions: + attention_probs = output[1] + attention_output = self.attention_output(output[0], input_tensor, training=training) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "att", None) is not None: + with tf.name_scope(self.att.name): + self.att.build(None) + if getattr(self, "attention_output", None) is not None: + with tf.name_scope(self.attention_output.name): + self.attention_output.build(None) + + +class TFLxmertLayer(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.attention = TFLxmertSelfAttentionLayer(config, name="attention") + self.intermediate = TFLxmertIntermediate(config, name="intermediate") + self.transformer_output = TFLxmertOutput(config, name="output") + + def call(self, hidden_states, attention_mask, output_attentions, training=False): + attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.transformer_output(intermediate_output, attention_output, training=training) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "attention", None) is not None: + with tf.name_scope(self.attention.name): + self.attention.build(None) + if getattr(self, "intermediate", None) is not None: + with tf.name_scope(self.intermediate.name): + self.intermediate.build(None) + if getattr(self, "transformer_output", None) is not None: + with tf.name_scope(self.transformer_output.name): + self.transformer_output.build(None) + + +class TFLxmertXLayer(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") + + # Self-attention Layers + self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") + self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") + + # Intermediate and Output Layers (FFNs) + self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") + self.lang_output = TFLxmertOutput(config, name="lang_output") + self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") + self.visn_output = TFLxmertOutput(config, name="visn_output") + + def cross_att( + self, + lang_input, + lang_attention_mask, + visn_input, + visn_attention_mask, + output_attentions, + training=False, + ): + # Cross Attention + + # Keras saving and loading model *does not work* with the same inputs for two layers. + lang_attention_lang_input = tf.identity(lang_input) + visn_attention_lang_input = tf.identity(lang_input) + lang_attention_visn_input = tf.identity(visn_input) + visn_attention_visn_input = tf.identity(visn_input) + + lang_att_output = self.visual_attention( + lang_attention_lang_input, + lang_attention_visn_input, + visn_attention_mask, + output_attentions=output_attentions, + training=training, + ) + visn_att_output = self.visual_attention( + visn_attention_visn_input, + visn_attention_lang_input, + lang_attention_mask, + output_attentions=output_attentions, + training=training, + ) + return lang_att_output, visn_att_output + + def self_att( + self, + lang_input, + lang_attention_mask, + visn_input, + visn_attention_mask, + training=False, + ): + # Self Attention + output_attentions = False + lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) + visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) + return lang_att_output[0], visn_att_output[0] + + def output_fc(self, lang_input, visn_input, training=False): + # FC layers + lang_inter_output = self.lang_inter(lang_input) + visn_inter_output = self.visn_inter(visn_input) + + # Layer output + lang_output = self.lang_output(lang_inter_output, lang_input, training) + visn_output = self.visn_output(visn_inter_output, visn_input, training) + return lang_output, visn_output + + def call( + self, + lang_feats, + lang_attention_mask, + visn_feats, + visn_attention_mask, + output_attentions, + training=False, + ): + lang_att_output = lang_feats + visn_att_output = visn_feats + + lang_att_output, visn_att_output = self.cross_att( + lang_att_output, + lang_attention_mask, + visn_att_output, + visn_attention_mask, + output_attentions, + training=training, + ) + attention_probs = lang_att_output[1:] + lang_att_output, visn_att_output = self.self_att( + lang_att_output[0], + lang_attention_mask, + visn_att_output[0], + visn_attention_mask, + training=training, + ) + lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) + + return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "visual_attention", None) is not None: + with tf.name_scope(self.visual_attention.name): + self.visual_attention.build(None) + if getattr(self, "lang_self_att", None) is not None: + with tf.name_scope(self.lang_self_att.name): + self.lang_self_att.build(None) + if getattr(self, "visn_self_att", None) is not None: + with tf.name_scope(self.visn_self_att.name): + self.visn_self_att.build(None) + if getattr(self, "lang_inter", None) is not None: + with tf.name_scope(self.lang_inter.name): + self.lang_inter.build(None) + if getattr(self, "lang_output", None) is not None: + with tf.name_scope(self.lang_output.name): + self.lang_output.build(None) + if getattr(self, "visn_inter", None) is not None: + with tf.name_scope(self.visn_inter.name): + self.visn_inter.build(None) + if getattr(self, "visn_output", None) is not None: + with tf.name_scope(self.visn_output.name): + self.visn_output.build(None) + + +class TFLxmertEncoder(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") + + # Number of layers + self.num_l_layers = config.l_layers + self.num_x_layers = config.x_layers + self.num_r_layers = config.r_layers + + # Layers + # Using self.layer instead of self.l_layer to support loading BERT weights. + self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] + self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] + self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] + self.config = config + + def call( + self, + lang_feats=None, + lang_attention_mask=None, + visual_feats=None, + visual_pos=None, + visual_attention_mask=None, + output_attentions=None, + training=False, + ): + vision_hidden_states = () + language_hidden_states = () + vision_attentions = () if output_attentions or self.config.output_attentions else None + language_attentions = () if output_attentions or self.config.output_attentions else None + cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None + + visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) + + # Run language layers + for layer_module in self.layer: + l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) + lang_feats = l_outputs[0] + language_hidden_states = language_hidden_states + (lang_feats,) + if language_attentions is not None: + language_attentions = language_attentions + (l_outputs[1],) + + # Run relational layers + for layer_module in self.r_layers: + v_outputs = layer_module( + visual_feats, + visual_attention_mask, + output_attentions, + training=training, + ) + visual_feats = v_outputs[0] + vision_hidden_states = vision_hidden_states + (visual_feats,) + if vision_attentions is not None: + vision_attentions = vision_attentions + (v_outputs[1],) + + # Run cross-modality layers + for layer_module in self.x_layers: + x_outputs = layer_module( + lang_feats, + lang_attention_mask, + visual_feats, + visual_attention_mask, + output_attentions, + training=training, + ) + lang_feats, visual_feats = x_outputs[:2] + vision_hidden_states = vision_hidden_states + (visual_feats,) + language_hidden_states = language_hidden_states + (lang_feats,) + if cross_encoder_attentions is not None: + cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) + + visual_encoder_outputs = ( + vision_hidden_states, + vision_attentions if output_attentions else None, + ) + lang_encoder_outputs = ( + language_hidden_states, + language_attentions if output_attentions else None, + ) + + return ( + visual_encoder_outputs, + lang_encoder_outputs, + cross_encoder_attentions if output_attentions else None, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "visn_fc", None) is not None: + with tf.name_scope(self.visn_fc.name): + self.visn_fc.build(None) + if getattr(self, "layer", None) is not None: + for layer in self.layer: + with tf.name_scope(layer.name): + layer.build(None) + if getattr(self, "x_layers", None) is not None: + for layer in self.x_layers: + with tf.name_scope(layer.name): + layer.build(None) + if getattr(self, "r_layers", None) is not None: + for layer in self.r_layers: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFLxmertMainLayer(keras.layers.Layer): + config_class = LxmertConfig + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.num_l_layers = config.l_layers + self.num_x_layers = config.x_layers + self.num_r_layers = config.r_layers + self.initializer_range = config.initializer_range + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.return_dict = config.use_return_dict + self.embeddings = TFLxmertEmbeddings(config, name="embeddings") + self.encoder = TFLxmertEncoder(config, name="encoder") + self.pooler = TFLxmertPooler(config, name="pooler") + self.config = config + + def get_input_embeddings(self): + return self.embeddings + + def set_input_embeddings(self, value): + self.embeddings.weight = value + self.embeddings.vocab_size = shape_list(value)[0] + + def _prune_heads(self, heads_to_prune): + raise NotImplementedError + + @unpack_inputs + def call( + self, + input_ids=None, + visual_feats=None, + visual_pos=None, + attention_mask=None, + visual_attention_mask=None, + token_type_ids=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if visual_pos is None or visual_feats is None: + raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") + + if attention_mask is None: + attention_mask = tf.fill(input_shape, 1) + + if token_type_ids is None: + token_type_ids = tf.fill(input_shape, 0) + + # Positional Word Embeddings + embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + + extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) + one_cst = tf.constant(1.0, dtype=embedding_output.dtype) + ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) + extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) + + if visual_attention_mask is not None: + extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) + extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) + + extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) + extended_visual_attention_mask = tf.multiply( + tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst + ) + else: + extended_visual_attention_mask = None + + # Run Lxmert encoder + encoder_outputs = self.encoder( + embedding_output, + extended_attention_mask, + visual_feats, + visual_pos, + extended_visual_attention_mask, + output_attentions, + training, + ) + visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] + vision_hidden_states = visual_encoder_outputs[0] + language_hidden_states = lang_encoder_outputs[0] + + all_attentions = () + if output_attentions: + language_attentions = lang_encoder_outputs[1] + vision_attentions = visual_encoder_outputs[1] + cross_encoder_attentions = encoder_outputs[2] + all_attentions = ( + language_attentions, + vision_attentions, + cross_encoder_attentions, + ) + + hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () + + visual_output = vision_hidden_states[-1] + lang_output = language_hidden_states[-1] + pooled_output = self.pooler(lang_output) + + if not return_dict: + return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions + + return TFLxmertModelOutput( + pooled_output=pooled_output, + language_output=lang_output, + vision_output=visual_output, + language_hidden_states=language_hidden_states if output_hidden_states else None, + vision_hidden_states=vision_hidden_states if output_hidden_states else None, + language_attentions=language_attentions if output_attentions else None, + vision_attentions=vision_attentions if output_attentions else None, + cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "pooler", None) is not None: + with tf.name_scope(self.pooler.name): + self.pooler.build(None) + + +class TFLxmertPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = LxmertConfig + base_model_prefix = "lxmert" + + @property + def dummy_inputs(self): + """ + Dummy inputs to build the network. + + Returns: + tf.Tensor with dummy inputs + """ + batch_size = 2 + num_visual_features = 10 + input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) + visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) + visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) + + return { + "input_ids": input_ids, + "visual_feats": visual_feats, + "visual_pos": visual_pos, + } + + @property + def input_signature(self): + return { + "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"), + "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"), + "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), + "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + } + + +LXMERT_START_DOCSTRING = r""" + + The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from + Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer + model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual + genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss + for question answering attribute prediction, and object tag prediction. + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Parameters: + config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +LXMERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): + This input represents visual features. They ROI pooled object features from bounding boxes using a + faster-RCNN model) + + These are currently not provided by the transformers library. + visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): + This input represents spacial features corresponding to their relative (via index) visual features. The + pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to + 1. + + These are currently not provided by the transformers library. + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", + LXMERT_START_DOCSTRING, +) +class TFLxmertModel(TFLxmertPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.lxmert = TFLxmertMainLayer(config, name="lxmert") + + @unpack_inputs + @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFLxmertModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + visual_feats: tf.Tensor | None = None, + visual_pos: tf.Tensor | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + visual_attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[Tuple, TFLxmertModelOutput]: + outputs = self.lxmert( + input_ids, + visual_feats, + visual_pos, + attention_mask, + visual_attention_mask, + token_type_ids, + inputs_embeds, + output_attentions, + output_hidden_states, + return_dict, + training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "lxmert", None) is not None: + with tf.name_scope(self.lxmert.name): + self.lxmert.build(None) + + +class TFLxmertPooler(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + activation="tanh", + name="dense", + ) + self.config = config + + def call(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + return pooled_output + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert +class TFLxmertPredictionHeadTransform(keras.layers.Layer): + def __init__(self, config: LxmertConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + units=config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + + if isinstance(config.hidden_act, str): + self.transform_act_fn = get_tf_activation(config.hidden_act) + else: + self.transform_act_fn = config.hidden_act + + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.config = config + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(inputs=hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert +class TFLxmertLMPredictionHead(keras.layers.Layer): + def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.hidden_size = config.hidden_size + + self.transform = TFLxmertPredictionHeadTransform(config, name="transform") + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.input_embeddings = input_embeddings + + def build(self, input_shape=None): + self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") + + if self.built: + return + self.built = True + if getattr(self, "transform", None) is not None: + with tf.name_scope(self.transform.name): + self.transform.build(None) + + def get_output_embeddings(self) -> keras.layers.Layer: + return self.input_embeddings + + def set_output_embeddings(self, value: tf.Variable): + self.input_embeddings.weight = value + self.input_embeddings.vocab_size = shape_list(value)[0] + + def get_bias(self) -> Dict[str, tf.Variable]: + return {"bias": self.bias} + + def set_bias(self, value: tf.Variable): + self.bias = value["bias"] + self.config.vocab_size = shape_list(value["bias"])[0] + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.transform(hidden_states=hidden_states) + seq_length = shape_list(hidden_states)[1] + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) + hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) + hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) + + return hidden_states + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert +class TFLxmertMLMHead(keras.layers.Layer): + def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): + super().__init__(**kwargs) + + self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") + + def call(self, sequence_output: tf.Tensor) -> tf.Tensor: + prediction_scores = self.predictions(hidden_states=sequence_output) + + return prediction_scores + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "predictions", None) is not None: + with tf.name_scope(self.predictions.name): + self.predictions.build(None) + + +class TFLxmertPreTrainingHeads(keras.layers.Layer): + def __init__(self, config, input_embeddings, **kwargs): + super().__init__(**kwargs) + self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") + + self.seq_relationship = keras.layers.Dense( + 2, + kernel_initializer=get_initializer(config.initializer_range), + name="seq_relationship", + ) + self.config = config + + def call(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "predictions", None) is not None: + with tf.name_scope(self.predictions.name): + self.predictions.build(None) + if getattr(self, "seq_relationship", None) is not None: + with tf.name_scope(self.seq_relationship.name): + self.seq_relationship.build([None, None, self.config.hidden_size]) + + +class TFLxmertVisualAnswerHead(keras.layers.Layer): + def __init__(self, config, num_labels, **kwargs): + super().__init__(**kwargs) + hid_dim = config.hidden_size + self.dense = keras.layers.Dense( + hid_dim * 2, + kernel_initializer=get_initializer(config.initializer_range), + name="logit_fc_._0", + ) + self.activation = get_tf_activation("gelu") + self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") + self.dense_1 = keras.layers.Dense( + num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name="logit_fc_._3", + ) + self.hid_dim = hid_dim + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.layer_norm(hidden_states) + hidden_states = self.dense_1(hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.hid_dim]) + if getattr(self, "layer_norm", None) is not None: + with tf.name_scope(self.layer_norm.name): + self.layer_norm.build([None, self.hid_dim * 2]) + if getattr(self, "dense_1", None) is not None: + with tf.name_scope(self.dense_1.name): + self.dense_1.build([None, None, self.hid_dim * 2]) + + +class TFLxmertVisualObjHead(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.transform = TFLxmertPredictionHeadTransform(config, name="transform") + + # Decide the use of visual losses + visual_losses = {} + if config.visual_obj_loss: + visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} + if config.visual_attr_loss: + visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} + if config.visual_feat_loss: + visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} + self.visual_losses = visual_losses + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder_dict = { + key: keras.layers.Dense( + self.visual_losses[key]["num"], + kernel_initializer=get_initializer(config.initializer_range), + name=f"decoder_dict.{key}", + ) + for key in self.visual_losses + } + self.config = config + + def call(self, hidden_states): + hidden_states = self.transform(hidden_states) + output = {} + for key in self.visual_losses: + output[key] = self.decoder_dict[key](hidden_states) + return output + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transform", None) is not None: + with tf.name_scope(self.transform.name): + self.transform.build(None) + if getattr(self, "decoder_dict", None) is not None: + for layer in self.decoder_dict.values(): + with tf.name_scope(layer.name): + layer.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) +class TFLxmertForPreTraining(TFLxmertPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.config = config + self.num_qa_labels = config.num_qa_labels + self.visual_loss_normalizer = config.visual_loss_normalizer + + # Use of pretraining tasks + self.task_mask_lm = config.task_mask_lm + self.task_obj_predict = config.task_obj_predict + self.task_matched = config.task_matched + self.task_qa = config.task_qa + + # Lxmert backbone + self.lxmert = TFLxmertMainLayer(config, name="lxmert") + + # Pre-training heads + self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") + if self.task_obj_predict: + self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") + if self.task_qa: + self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") + + # Loss functions + self.loss_fcts = { + "l2": keras.losses.Huber(delta=1.0, name="huber_loss"), + "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), + "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), + } + + visual_losses = {} + if config.visual_obj_loss: + visual_losses["obj"] = { + "shape": (-1,), + "num": config.num_object_labels, + "loss": "visn_ce", + } + if config.visual_attr_loss: + visual_losses["attr"] = { + "shape": (-1,), + "num": config.num_attr_labels, + "loss": "visn_ce", + } + if config.visual_feat_loss: + visual_losses["feat"] = { + "shape": (-1, config.visual_feat_dim), + "num": config.visual_feat_dim, + "loss": "l2", + } + self.visual_losses = visual_losses + + @property + def dummy_inputs(self): + """ + Dummy inputs to build the network. + + Returns: + tf.Tensor with dummy inputs + """ + batch_size = 2 + num_visual_features = 10 + input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) + visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) + visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) + + if self.config.task_obj_predict: + obj_labels = {} + if self.config.visual_attr_loss and self.config.task_obj_predict: + obj_labels["attr"] = ( + tf.ones([batch_size, num_visual_features]), + tf.ones([batch_size, num_visual_features]), + ) + if self.config.visual_feat_loss and self.config.task_obj_predict: + obj_labels["feat"] = ( + tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), + tf.ones([batch_size, num_visual_features]), + ) + if self.config.visual_obj_loss and self.config.task_obj_predict: + obj_labels["obj"] = ( + tf.ones([batch_size, num_visual_features]), + tf.ones([batch_size, num_visual_features]), + ) + + return { + **{ + "input_ids": input_ids, + "visual_feats": visual_feats, + "visual_pos": visual_pos, + }, + **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), + } + + def get_lm_head(self): + return self.cls.predictions + + def get_prefix_bias_name(self): + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name + + @unpack_inputs + @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: TFModelInputType | None = None, + visual_feats: tf.Tensor | None = None, + visual_pos: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + visual_attention_mask: tf.Tensor | None = None, + token_type_ids: tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + masked_lm_labels: tf.Tensor | None = None, + obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None, + matched_label: tf.Tensor | None = None, + ans: tf.Tensor | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + return_dict: bool | None = None, + training: bool = False, + ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput: + r""" + masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): + each key is named after each one of the visual losses and each element of the tuple is of the shape + `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and + the label score respectively + matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the whether or not the text input matches the image (classification) loss. Input + should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: + + - 0 indicates that the sentence does not match the image, + - 1 indicates that the sentence does match the image. + ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): + a one hot representation hof the correct answer *optional* + + Returns: + """ + + lxmert_output = self.lxmert( + input_ids, + visual_feats, + visual_pos, + attention_mask, + visual_attention_mask, + token_type_ids, + inputs_embeds, + output_attentions, + output_hidden_states, + return_dict, + training, + ) + + lang_output, visual_output, pooled_output = ( + lxmert_output[0], + lxmert_output[1], + lxmert_output[2], + ) + lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) + if self.task_qa: + answer_score = self.answer_head(pooled_output) + else: + answer_score = pooled_output[0][0] + + total_loss = ( + None + if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) + else tf.constant(0.0) + ) + losses = () + if masked_lm_labels is not None and self.task_mask_lm: + masked_lm_loss = self.loss_fcts["ce"]( + tf.reshape(masked_lm_labels, [-1]), + tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), + ) + total_loss += masked_lm_loss + losses += (masked_lm_loss,) + if matched_label is not None and self.task_matched: + matched_loss = self.loss_fcts["ce"]( + tf.reshape(matched_label, [-1]), + tf.reshape(cross_relationship_score, [-1, 2]), + ) + total_loss += matched_loss + losses += (matched_loss,) + if obj_labels is not None and self.task_obj_predict: + total_visn_loss = 0.0 + visn_prediction_scores_dict = self.obj_predict_head(visual_output) + for key, key_info in self.visual_losses.items(): + label, mask_conf = obj_labels[key] + output_dim = key_info["num"] + loss_fct_name = key_info["loss"] + label_shape = key_info["shape"] + weight = self.visual_loss_normalizer + visn_loss_fct = self.loss_fcts[loss_fct_name] + visn_prediction_scores = visn_prediction_scores_dict[key] + visn_loss = visn_loss_fct( + tf.reshape(label, label_shape), + tf.reshape(visn_prediction_scores, [-1, output_dim]), + ) + + if visn_loss.ndim > 1: # Regression Losses + visn_loss = tf.reduce_mean(visn_loss) + visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight + total_visn_loss += visn_loss + losses += (visn_loss,) + total_loss += total_visn_loss + if ans is not None and self.task_qa: + answer_loss = self.loss_fcts["ce"]( + tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) + ) + # exclude "*2" here to match the effect of QA losses. + # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) + # Now : (loss *1) for 12 epochs + # + # * 2 # Multiply by 2 because > half of the data will not have label + total_loss += answer_loss + losses += (answer_loss,) + # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() + + if not return_dict: + output = ( + lang_prediction_scores, + cross_relationship_score, + answer_score, + ) + lxmert_output[3:] + return ((total_loss,) + output) if total_loss is not None else output + + return TFLxmertForPreTrainingOutput( + loss=total_loss, + prediction_logits=lang_prediction_scores, + cross_relationship_score=cross_relationship_score, + question_answering_score=answer_score, + language_hidden_states=lxmert_output.language_hidden_states, + vision_hidden_states=lxmert_output.vision_hidden_states, + language_attentions=lxmert_output.language_attentions, + vision_attentions=lxmert_output.vision_attentions, + cross_encoder_attentions=lxmert_output.cross_encoder_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "lxmert", None) is not None: + with tf.name_scope(self.lxmert.name): + self.lxmert.build(None) + if getattr(self, "cls", None) is not None: + with tf.name_scope(self.cls.name): + self.cls.build(None) + if getattr(self, "obj_predict_head", None) is not None: + with tf.name_scope(self.obj_predict_head.name): + self.obj_predict_head.build(None) + if getattr(self, "answer_head", None) is not None: + with tf.name_scope(self.answer_head.name): + self.answer_head.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py new file mode 100644 index 0000000000000000000000000000000000000000..8d2fca9328ddc4b658760e5597d766d4b885c3b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py @@ -0,0 +1,503 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + + +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer +class LxmertTokenizer(PreTrainedTokenizer): + r""" + Construct a Lxmert tokenizer. Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original Lxmert). + """ + + vocab_files_names = VOCAB_FILES_NAMES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) + + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text, split_special_tokens=False): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize( + text, never_split=self.all_special_tokens if not split_special_tokens else None + ): + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A Lxmert sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + do_split_on_punc (`bool`, *optional*, defaults to `True`): + In some instances we want to skip the basic punctuation splitting so that later tokenization can capture + the full context of the words, such as contractions. + """ + + def __init__( + self, + do_lower_case=True, + never_split=None, + tokenize_chinese_chars=True, + strip_accents=None, + do_split_on_punc=True, + ): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + self.do_split_on_punc = do_split_on_punc + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + # prevents treating the same character with different unicode codepoints as different characters + unicode_normalized_text = unicodedata.normalize("NFC", text) + orig_tokens = whitespace_tokenize(unicode_normalized_text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if not self.do_split_on_punc or (never_split is not None and text in never_split): + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..e31fdbcf761d50b20615c91b5587279c5fdd266e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from .tokenization_lxmert import LxmertTokenizer + + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} + + +# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert +class LxmertTokenizerFast(PreTrainedTokenizerFast): + r""" + Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original Lxmert). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = LxmertTokenizer + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A Lxmert sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1 is not None: + output += token_ids_1 + [self.sep_token_id] + + return output + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mistral/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34727d98cf05afa29d49b392324fbbab38e8e468 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mistral/__init__.py @@ -0,0 +1,82 @@ +# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available + + +_import_structure = { + "configuration_mistral": ["MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP", "MistralConfig"], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_mistral"] = [ + "MistralForCausalLM", + "MistralModel", + "MistralPreTrainedModel", + "MistralForSequenceClassification", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_mistral"] = [ + "FlaxMistralForCausalLM", + "FlaxMistralModel", + "FlaxMistralPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_mistral import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP, MistralConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_mistral import ( + MistralForCausalLM, + MistralForSequenceClassification, + MistralModel, + MistralPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_mistral import ( + FlaxMistralForCausalLM, + FlaxMistralModel, + FlaxMistralPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66379c8d39dd92a06512265ce73cf8528e234e5f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8641ff2f9a84b4889e6517f4ecf1e06109087a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/configuration_mistral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/convert_mistral_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/convert_mistral_weights_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad6ec8574441d9f292db42cb95934756695d798c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/convert_mistral_weights_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a79285214a005fea70adf85c77969fca52d93fe9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_flax_mistral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a877599f189278cb3c9fcd8950f3107ce6eca87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mistral/__pycache__/modeling_mistral.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/configuration_mistral.py b/venv/lib/python3.10/site-packages/transformers/models/mistral/configuration_mistral.py new file mode 100644 index 0000000000000000000000000000000000000000..83dd0e7a621cfff132ff5e665de7a3d0d549f55a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mistral/configuration_mistral.py @@ -0,0 +1,150 @@ +# coding=utf-8 +# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Mistral model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class MistralConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an + Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1. + + [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) + [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`MistralModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 14336): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*, defaults to 8): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to `4096*32`): + The maximum sequence length that this model might ever be used with. Mistral's sliding window attention + allows sequence of up to 4096*32 tokens. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*): + The id of the padding token. + bos_token_id (`int`, *optional*, defaults to 1): + The id of the "beginning-of-sequence" token. + eos_token_id (`int`, *optional*, defaults to 2): + The id of the "end-of-sequence" token. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether the model's input and output word embeddings should be tied. + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + sliding_window (`int`, *optional*, defaults to 4096): + Sliding window attention window size. If not specified, will default to `4096`. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + + ```python + >>> from transformers import MistralModel, MistralConfig + + >>> # Initializing a Mistral 7B style configuration + >>> configuration = MistralConfig() + + >>> # Initializing a model from the Mistral 7B style configuration + >>> model = MistralModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "mistral" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=32000, + hidden_size=4096, + intermediate_size=14336, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=8, + hidden_act="silu", + max_position_embeddings=4096 * 32, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=None, + bos_token_id=1, + eos_token_id=2, + tie_word_embeddings=False, + rope_theta=10000.0, + sliding_window=4096, + attention_dropout=0.0, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.sliding_window = sliding_window + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.attention_dropout = attention_dropout + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/convert_mistral_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/mistral/convert_mistral_weights_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba6236ee8e249ef9bb0af4b873cf1cd7b1ee6c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mistral/convert_mistral_weights_to_hf.py @@ -0,0 +1,276 @@ +# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os +import shutil +import warnings + +import torch + +from transformers import ( + LlamaTokenizer, + MistralConfig, + MistralForCausalLM, +) + + +try: + from transformers import LlamaTokenizerFast + + tokenizer_class = LlamaTokenizerFast +except ImportError as e: + warnings.warn(e) + warnings.warn( + "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" + ) + tokenizer_class = LlamaTokenizer + +""" +Sample usage: + +``` +python src/transformers/models/mistral/convert_mistral_weights_to_hf.py \ + --input_dir /path/to/downloaded/mistral/weights --model_size 7B --output_dir /output/path +``` + +Thereafter, models can be loaded via: + +```py +from transformers import MistralForCausalLM, LlamaTokenizer + +model = MistralForCausalLM.from_pretrained("/output/path") +tokenizer = LlamaTokenizer.from_pretrained("/output/path") +``` + +Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions +come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). +""" + +NUM_SHARDS = {"7B": 1} + + +def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): + return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) + + +def read_json(path): + with open(path, "r") as f: + return json.load(f) + + +def write_json(text, path): + with open(path, "w") as f: + json.dump(text, f) + + +def write_model(model_path, input_base_path, model_size, tokenizer_path=None, safe_serialization=True): + # for backward compatibility, before you needed the repo to be called `my_repo/model_size` + if not os.path.isfile(os.path.join(input_base_path, "params.json")): + input_base_path = os.path.join(input_base_path, model_size) + + os.makedirs(model_path, exist_ok=True) + tmp_model_path = os.path.join(model_path, "tmp") + os.makedirs(tmp_model_path, exist_ok=True) + + params = read_json(os.path.join(input_base_path, "params.json")) + num_shards = NUM_SHARDS[model_size] + + # For some reason this is a string in the params.json + sliding_window = int(params["sliding_window"]) + n_layers = params["n_layers"] + n_heads = params["n_heads"] + n_heads_per_shard = n_heads // num_shards + dim = params["dim"] + dims_per_head = dim // n_heads + base = params.get("rope_theta", 10000.0) + inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) + max_position_embeddings = 4096 * 8 + + if tokenizer_path is not None: + tokenizer = tokenizer_class(tokenizer_path) + tokenizer.save_pretrained(model_path) + vocab_size = tokenizer.vocab_size if tokenizer_path is not None else 32000 + + if "n_kv_heads" in params: + num_key_value_heads = params["n_kv_heads"] # for GQA / MQA + num_local_key_value_heads = num_key_value_heads // num_shards + key_value_dim = dims_per_head * num_local_key_value_heads + else: # compatibility with other checkpoints + num_key_value_heads = n_heads + num_local_key_value_heads = n_heads_per_shard + key_value_dim = dim + + # permute for sliced rotary + def permute(w, n_heads=n_heads, dim1=dim, dim2=dim): + return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) + + print(f"Fetching all parameters from the checkpoint at {input_base_path}.") + # Load weights + loaded = [ + torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu") + for i in range(num_shards) + ] + param_count = 0 + index_dict = {"weight_map": {}} + for layer_i in range(n_layers): + filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" + + # Sharded + # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share + # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is + # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. + + state_dict = { + f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ + f"layers.{layer_i}.attention_norm.weight" + ].clone(), + f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ + f"layers.{layer_i}.ffn_norm.weight" + ].clone(), + } + state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( + torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) + for i in range(num_shards) + ], + dim=0, + ).reshape(dim, dim) + ) + state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( + torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( + num_local_key_value_heads, dims_per_head, dim + ) + for i in range(num_shards) + ], + dim=0, + ).reshape(key_value_dim, dim), + num_key_value_heads, + key_value_dim, + dim, + ) + state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( + [ + loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(num_local_key_value_heads, dims_per_head, dim) + for i in range(num_shards) + ], + dim=0, + ).reshape(key_value_dim, dim) + + state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 + ) + state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 + ) + state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 + ) + state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( + [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 + ) + + state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" + state_dict = { + "model.norm.weight": loaded[0]["norm.weight"], + "model.embed_tokens.weight": torch.cat([loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1), + "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), + } + + for k, v in state_dict.items(): + index_dict["weight_map"][k] = filename + param_count += v.numel() + torch.save(state_dict, os.path.join(tmp_model_path, filename)) + + # Write configs + index_dict["metadata"] = {"total_size": param_count * 2} + write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json")) + config = MistralConfig( + hidden_size=dim, + intermediate_size=params["hidden_dim"], + num_attention_heads=params["n_heads"], + num_hidden_layers=params["n_layers"], + rms_norm_eps=params["norm_eps"], + num_key_value_heads=num_key_value_heads, + vocab_size=vocab_size, + rope_theta=base, + max_position_embeddings=max_position_embeddings, + sliding_window=sliding_window, + ) + config.save_pretrained(tmp_model_path) + + # Make space so we can load the model properly now. + del state_dict + del loaded + gc.collect() + + print("Loading the checkpoint in a Mistral model.") + model = MistralForCausalLM.from_pretrained(tmp_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True) + # Avoid saving this as part of the config. + del model.config._name_or_path + model.config.torch_dtype = torch.float16 + print("Saving in the Transformers format.") + model.save_pretrained(model_path, safe_serialization=safe_serialization) + shutil.rmtree(tmp_model_path) + + +def write_tokenizer(tokenizer_path, input_tokenizer_path): + # Initialize the tokenizer based on the `spm` model + print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.") + tokenizer = tokenizer_class(input_tokenizer_path) + tokenizer.save_pretrained(tokenizer_path) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_dir", + help="Location of Mistral weights, which contains tokenizer.model and model folders", + ) + parser.add_argument( + "--model_size", + choices=["7B", "tokenizer_only"], + help="'f' models correspond to the finetuned versions, and are specific to the Mistral2 official release. For more details on Mistral2, checkout the original repo: https://huggingface.co/meta-mistral", + ) + parser.add_argument( + "--output_dir", + help="Location to write HF model and tokenizer", + ) + parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.") + args = parser.parse_args() + spm_path = os.path.join(args.input_dir, "tokenizer.model") + if args.model_size != "tokenizer_only": + write_model( + model_path=args.output_dir, + input_base_path=args.input_dir, + model_size=args.model_size, + safe_serialization=args.safe_serialization, + tokenizer_path=spm_path, + ) + else: + write_tokenizer(args.output_dir, spm_path) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_flax_mistral.py b/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_flax_mistral.py new file mode 100644 index 0000000000000000000000000000000000000000..3480fc7214a84a69a401e511426a2517afb2e959 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_flax_mistral.py @@ -0,0 +1,741 @@ +# coding=utf-8 +# Copyright 2024 Mistral AI and the HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Flax Mistral model.""" +from typing import Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import ( + FlaxBaseModelOutput, + FlaxBaseModelOutputWithPast, + FlaxCausalLMOutput, + FlaxCausalLMOutputWithCrossAttentions, +) +from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, logging +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward +from .configuration_mistral import MistralConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "MistralConfig" +_REAL_CHECKPOINT_FOR_DOC = "mistralai/Mistral-7B-v0.1" +_CHECKPOINT_FOR_DOC = "ksmcg/Mistral-tiny" + +MISTRAL_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`MistralConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16`, or + `jax.numpy.bfloat16`. + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +MISTRAL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaRMSNorm with Llama->Mistral +class FlaxMistralRMSNorm(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.epsilon = self.config.rms_norm_eps + self.weight = self.param("weight", lambda _, shape: jnp.ones(shape), self.config.hidden_size) + + def __call__(self, hidden_states): + variance = jnp.asarray(hidden_states, dtype=jnp.float32) + variance = jnp.power(variance, 2) + variance = variance.mean(-1, keepdims=True) + # use `jax.numpy.sqrt` as `jax.lax.rsqrt` does not match `torch.rsqrt` + hidden_states = hidden_states / jnp.sqrt(variance + self.epsilon) + + return self.weight * jnp.asarray(hidden_states, dtype=self.dtype) + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaRotaryEmbedding with Llama->Mistral +class FlaxMistralRotaryEmbedding(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + head_dim = self.config.hidden_size // self.config.num_attention_heads + self.sincos = create_sinusoidal_positions(self.config.max_position_embeddings, head_dim) + + def __call__(self, key, query, position_ids): + sincos = self.sincos[position_ids] + sin_pos, cos_pos = jnp.split(sincos, 2, axis=-1) + + key = apply_rotary_pos_emb(key, sin_pos, cos_pos) + query = apply_rotary_pos_emb(query, sin_pos, cos_pos) + + key = jnp.asarray(key, dtype=self.dtype) + query = jnp.asarray(query, dtype=self.dtype) + + return key, query + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaMLP with Llama->Mistral +class FlaxMistralMLP(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * embed_dim + + kernel_init = jax.nn.initializers.normal(self.config.initializer_range) + self.act = ACT2FN[self.config.hidden_act] + + self.gate_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + self.down_proj = nn.Dense(embed_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + self.up_proj = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, kernel_init=kernel_init) + + def __call__(self, hidden_states): + up_proj_states = self.up_proj(hidden_states) + gate_states = self.act(self.gate_proj(hidden_states)) + + hidden_states = self.down_proj(up_proj_states * gate_states) + return hidden_states + + +# Copied from transformers.models.llama.modeling_flax_llama.apply_rotary_pos_emb +def apply_rotary_pos_emb(tensor, sin_pos, cos_pos): + return (tensor * cos_pos) + (rotate_half(tensor) * sin_pos) + + +# Copied from transformers.models.llama.modeling_flax_llama.create_sinusoidal_positions +def create_sinusoidal_positions(num_pos, dim): + inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim)) + freqs = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32") + + emb = np.concatenate((freqs, freqs), axis=-1) + out = np.concatenate((np.sin(emb)[:, None, :], np.cos(emb)[:, None, :]), axis=-1) + return jnp.array(out[:, :, :num_pos]) + + +# Copied from transformers.models.llama.modeling_flax_llama.rotate_half +def rotate_half(tensor): + """Rotates half the hidden dims of the input.""" + rotate_half_tensor = jnp.concatenate( + (-tensor[..., tensor.shape[-1] // 2 :], tensor[..., : tensor.shape[-1] // 2]), axis=-1 + ) + return rotate_half_tensor + + +class FlaxMistralAttention(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + config = self.config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.attention_softmax_in_fp32 = self.dtype is not jnp.float32 + self.rope_theta = config.rope_theta + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Dense(self.num_heads * self.head_dim, use_bias=False, dtype=self.dtype) + self.k_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=False, dtype=self.dtype) + self.v_proj = nn.Dense(self.num_key_value_heads * self.head_dim, use_bias=False, dtype=self.dtype) + self.o_proj = nn.Dense(self.hidden_size, use_bias=False, dtype=self.dtype) + casual_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") + self.causal_mask = jnp.triu(casual_mask, k=-config.sliding_window) + self.rotary_emb = FlaxMistralRotaryEmbedding(config, dtype=self.dtype) + + def _split_heads(self, hidden_states, num_heads): + return hidden_states.reshape(hidden_states.shape[:2] + (num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.hidden_size,)) + + @nn.compact + # Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoSelfAttention._concatenate_to_cache + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + deterministic: bool = True, + output_attentions: bool = False, + init_cache: bool = False, + ) -> Tuple[jnp.ndarray, jnp.ndarray]: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = self._split_heads(query_states, self.num_heads) + key_states = self._split_heads(key_states, self.num_key_value_heads) + value_states = self._split_heads(value_states, self.num_key_value_heads) + + key_states, query_states = self.rotary_emb(key_states, query_states, position_ids) + query_length, key_length = query_states.shape[1], key_states.shape[1] + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + + batch_size = hidden_states.shape[0] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + + if self.has_variable("cache", "cached_key") or init_cache: + key_states, value_states, attention_mask = self._concatenate_to_cache( + key_states, value_states, query_states, attention_mask + ) + key_states = jnp.repeat(key_states, self.num_key_value_groups, axis=2) + value_states = jnp.repeat(value_states, self.num_key_value_groups, axis=2) + + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + + # usual dot product attention + attention_dtype = jnp.float32 if self.attention_softmax_in_fp32 else self.dtype + attn_weights = dot_product_attention_weights( + query_states, + key_states, + bias=attention_bias, + deterministic=deterministic, + dropout_rate=self.config.attention_dropout, + dtype=attention_dtype, + ) + + if self.attention_softmax_in_fp32: + attn_weights = attn_weights.astype(self.dtype) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) + attn_output = self._merge_heads(attn_output) + attn_output = self.o_proj(attn_output) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaDecoderLayer with Llama->Mistral +class FlaxMistralDecoderLayer(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.input_layernorm = FlaxMistralRMSNorm(self.config, dtype=self.dtype) + self.self_attn = FlaxMistralAttention(self.config, dtype=self.dtype) + self.post_attention_layernorm = FlaxMistralRMSNorm(self.config, dtype=self.dtype) + self.mlp = FlaxMistralMLP(self.config, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + outputs = self.self_attn( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + # residual connection + attn_output = outputs[0] + hidden_states = residual + attn_output + + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + hidden_states + + return (hidden_states,) + outputs[1:] + + +# Copied from transformers.models.gpt_neo.modeling_flax_gpt_neo.FlaxGPTNeoPreTrainedModel with GPTNeo->Mistral, GPT_NEO->MISTRAL, transformer->model +class FlaxMistralPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MistralConfig + base_model_prefix = "model" + module_class: nn.Module = None + + def __init__( + self, + config: MistralConfig, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length)) + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + past_key_values: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + batch_size, sequence_length = input_ids.shape + + if position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") + + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + if attention_mask is None: + attention_mask = jnp.ones((batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxMistralAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + False, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + return outputs + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaLayerCollection with Llama->Mistral +class FlaxMistralLayerCollection(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.blocks = [ + FlaxMistralDecoderLayer(self.config, dtype=self.dtype, name=str(i)) + for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = False, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for block in self.blocks: + if output_hidden_states: + all_hidden_states += (hidden_states,) + layer_outputs = block( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + # this contains possible `None` values - `FlaxMistralModule` will filter them out + outputs = (hidden_states, all_hidden_states, all_attentions) + + return outputs + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaModule with Llama->Mistral +class FlaxMistralModule(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.hidden_size = self.config.hidden_size + embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) + self.embed_tokens = nn.Embed( + self.config.vocab_size, + self.hidden_size, + embedding_init=embedding_init, + dtype=self.dtype, + ) + self.layers = FlaxMistralLayerCollection(self.config, dtype=self.dtype) + self.norm = FlaxMistralRMSNorm(self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + deterministic=True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + input_embeds = self.embed_tokens(input_ids.astype("i4")) + + outputs = self.layers( + input_embeds, + position_ids=position_ids, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.norm(hidden_states) + + if output_hidden_states: + all_hidden_states = outputs[1] + (hidden_states,) + outputs = (hidden_states, all_hidden_states) + outputs[2:] + else: + outputs = (hidden_states,) + outputs[1:] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=outputs[1], + attentions=outputs[-1], + ) + + +@add_start_docstrings( + "The bare Mistral Model transformer outputting raw hidden-states without any specific head on top.", + MISTRAL_START_DOCSTRING, +) +class FlaxMistralModel(FlaxMistralPreTrainedModel): + module_class = FlaxMistralModule + + +append_call_sample_docstring( + FlaxMistralModel, + _CHECKPOINT_FOR_DOC, + FlaxBaseModelOutputWithPast, + _CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, +) + + +# Copied from transformers.models.llama.modeling_flax_llama.FlaxLlamaForCausalLMModule with Llama->Mistral +class FlaxMistralForCausalLMModule(nn.Module): + config: MistralConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.model = FlaxMistralModule(self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.vocab_size, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + outputs = self.model( + input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + return (lm_logits,) + outputs[1:] + + return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) + + +@add_start_docstrings( + """ + The Mistral Model transformer with a language modeling head (linear layer) on top. + """, + MISTRAL_START_DOCSTRING, +) + +# Copied from transformers.models.gptj.modeling_flax_gptj.FlaxGPTJForCausalLM with GPTJ->Mistral +class FlaxMistralForCausalLM(FlaxMistralPreTrainedModel): + module_class = FlaxMistralForCausalLMModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since Mistral uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring( + FlaxMistralForCausalLM, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutputWithCrossAttentions, + _CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.py b/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.py new file mode 100644 index 0000000000000000000000000000000000000000..c013967c78f1161823ceea420af10da73d22487d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mistral/modeling_mistral.py @@ -0,0 +1,1387 @@ +# coding=utf-8 +# Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Mistral model.""" +import inspect +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa +from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_mistral import MistralConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "MistralConfig" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral +class MistralRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + MistralRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +# copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral +# TODO @Arthur no longer copied from LLama after static cache +class MistralRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:seq_len].to(dtype=x.dtype), + self.sin_cached[:seq_len].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.rotate_half +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +# copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb +# TODO @Arthur no longer copied from LLama after static cache +def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`): + The position indices of the tokens corresponding to the query and key tensors. For example, this can be + used to pass offsetted position ids when working with a KV-cache. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos[position_ids].unsqueeze(unsqueeze_dim) + sin = sin[position_ids].unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class MistralMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class MistralAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: MistralConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.attention_dropout = config.attention_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.rotary_emb = MistralRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class MistralFlashAttention2(MistralAttention): + """ + Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, + ): + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + # overwrite attention_mask with padding_mask + attention_mask = kwargs.pop("padding_mask") + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + if self.layer_idx is None: + raise ValueError( + f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " + "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " + "with a layer index." + ) + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + + # Because the input can be padded, the absolute sequence length depends on the max position id. + rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1 + cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + use_sliding_windows = ( + _flash_supports_window_size + and getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + ) + + if not _flash_supports_window_size: + logger.warning_once( + "The current flash attention version does not support sliding window attention, for a more memory efficient implementation" + " make sure to upgrade flash-attn library." + ) + + if past_key_value is not None: + # Activate slicing cache only if the config has a value `sliding_windows` attribute + cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 + if ( + getattr(self.config, "sliding_window", None) is not None + and kv_seq_len > self.config.sliding_window + and cache_has_contents + ): + slicing_tokens = 1 - self.config.sliding_window + + past_key = past_key_value[self.layer_idx][0] + past_value = past_key_value[self.layer_idx][1] + + past_key = past_key[:, :, slicing_tokens:, :].contiguous() + past_value = past_value[:, :, slicing_tokens:, :].contiguous() + + if past_key.shape[-2] != self.config.sliding_window - 1: + raise ValueError( + f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" + f" {past_key.shape}" + ) + + if attention_mask is not None: + attention_mask = attention_mask[:, slicing_tokens:] + attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) + + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reashape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = self._flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + dropout=dropout_rate, + use_sliding_windows=use_sliding_windows, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, + query_states, + key_states, + value_states, + attention_mask, + query_length, + dropout=0.0, + softmax_scale=None, + use_sliding_windows=False, + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + use_sliding_windows (`bool`, *optional*): + Whether to activate sliding window attention. + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + if not use_sliding_windows: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + if not use_sliding_windows: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + else: + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + dropout, + softmax_scale=softmax_scale, + causal=causal, + window_size=(self.config.sliding_window, self.config.sliding_window), + ) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape + + # On the first iteration we need to properly re-create the padding mask + # by slicing it on the proper place + if kv_seq_len != attention_mask.shape[-1]: + attention_mask_num_tokens = attention_mask.shape[-1] + attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :] + + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k) + + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Mistral +# TODO @Arthur no longer copied from LLama after static cache +class MistralSdpaAttention(MistralAttention): + """ + Mistral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `MistralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from MistralAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "MistralModel is using MistralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=attention_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal=self.is_causal and attention_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +MISTRAL_ATTENTION_CLASSES = { + "eager": MistralAttention, + "flash_attention_2": MistralFlashAttention2, + "sdpa": MistralSdpaAttention, +} + + +class MistralDecoderLayer(nn.Module): + def __init__(self, config: MistralConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = MistralMLP(config) + self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, sequence_length)` where padding elements are indicated by 0. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +MISTRAL_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MistralConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Mistral Model outputting raw hidden-states without any specific head on top.", + MISTRAL_START_DOCSTRING, +) +class MistralPreTrainedModel(PreTrainedModel): + config_class = MistralConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["MistralDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +MISTRAL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Mistral Model outputting raw hidden-states without any specific head on top.", + MISTRAL_START_DOCSTRING, +) +class MistralModel(MistralPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`] + + Args: + config: MistralConfig + """ + + def __init__(self, config: MistralConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self._attn_implementation = config._attn_implementation + self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + past_key_values_length = 0 + + if use_cache: + use_legacy_cache = not isinstance(past_key_values, Cache) + if use_legacy_cache: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_key_values_length = past_key_values.get_usable_length(seq_length) + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: + is_padding_right = attention_mask[:, -1].sum().item() != batch_size + if is_padding_right: + raise ValueError( + "You are attempting to perform batched generation with padding_side='right'" + " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to " + " call `tokenizer.padding_side = 'left'` before tokenizing the input. " + ) + + if self._attn_implementation == "flash_attention_2": + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + elif self._attn_implementation == "sdpa" and not output_attentions: + # output_attentions=True can not be supported when using SDPA, and we fall back on + # the manual implementation that requires a 4D causal mask in all cases. + attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, + (batch_size, seq_length), + inputs_embeds, + past_key_values_length, + sliding_window=self.config.sliding_window, + ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class MistralForCausalLM(MistralPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = MistralModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, MistralForCausalLM + + >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") + >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Ensure tensors are on the same device + shift_labels = shift_labels.to(shift_logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + # Omit tokens covered by past_key_values + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """ + The Mistral Model transformer with a sequence classification head on top (linear layer). + + [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + MISTRAL_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Mistral, LLAMA->MISTRAL +class MistralForSequenceClassification(MistralPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = MistralModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..774179f5f6f445565de0da790c12f9e759c7301a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/__init__.py @@ -0,0 +1,164 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], + "tokenization_roberta": ["RobertaTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_roberta_fast"] = ["RobertaTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_roberta"] = [ + "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "RobertaForCausalLM", + "RobertaForMaskedLM", + "RobertaForMultipleChoice", + "RobertaForQuestionAnswering", + "RobertaForSequenceClassification", + "RobertaForTokenClassification", + "RobertaModel", + "RobertaPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_roberta"] = [ + "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFRobertaForCausalLM", + "TFRobertaForMaskedLM", + "TFRobertaForMultipleChoice", + "TFRobertaForQuestionAnswering", + "TFRobertaForSequenceClassification", + "TFRobertaForTokenClassification", + "TFRobertaMainLayer", + "TFRobertaModel", + "TFRobertaPreTrainedModel", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_roberta"] = [ + "FlaxRobertaForCausalLM", + "FlaxRobertaForMaskedLM", + "FlaxRobertaForMultipleChoice", + "FlaxRobertaForQuestionAnswering", + "FlaxRobertaForSequenceClassification", + "FlaxRobertaForTokenClassification", + "FlaxRobertaModel", + "FlaxRobertaPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig + from .tokenization_roberta import RobertaTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_roberta_fast import RobertaTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_roberta import ( + ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + RobertaForCausalLM, + RobertaForMaskedLM, + RobertaForMultipleChoice, + RobertaForQuestionAnswering, + RobertaForSequenceClassification, + RobertaForTokenClassification, + RobertaModel, + RobertaPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_roberta import ( + TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRobertaForCausalLM, + TFRobertaForMaskedLM, + TFRobertaForMultipleChoice, + TFRobertaForQuestionAnswering, + TFRobertaForSequenceClassification, + TFRobertaForTokenClassification, + TFRobertaMainLayer, + TFRobertaModel, + TFRobertaPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_roberta import ( + FlaxRobertaForCausalLM, + FlaxRobertaForMaskedLM, + FlaxRobertaForMultipleChoice, + FlaxRobertaForQuestionAnswering, + FlaxRobertaForSequenceClassification, + FlaxRobertaForTokenClassification, + FlaxRobertaModel, + FlaxRobertaPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9743139e389bfc043d6b1dc7221850e4bd692e06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b61608470ad6aa92ba1fb09daff6554d5b3fac24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/configuration_roberta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3910b01021d369a5293d9f343072a0a26fa4ae8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/convert_roberta_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2622196c875f8e0e2a67f577951745fb58118234 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_flax_roberta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da48c629877cb2b6b0a6cd82f17ad9ea3342b74d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_roberta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f883688d25b47361353a3afe8513692e7cb110be Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/modeling_tf_roberta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1429d71e8268cae190b32ec4edfcd3166abdf320 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29f35b24aa379bf36a5bddf35ccafd70144cff0a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roberta/__pycache__/tokenization_roberta_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/configuration_roberta.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/configuration_roberta.py new file mode 100644 index 0000000000000000000000000000000000000000..aa549556d949fda5c9e4dfbb02be27a245a6e3b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/configuration_roberta.py @@ -0,0 +1,154 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" RoBERTa configuration""" +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class RobertaConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`RobertaModel`] or a [`TFRobertaModel`]. It is + used to instantiate a RoBERTa model according to the specified arguments, defining the model architecture. + Instantiating a configuration with the defaults will yield a similar configuration to that of the RoBERTa + [FacebookAI/roberta-base](https://huggingface.co/FacebookAI/roberta-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50265): + Vocabulary size of the RoBERTa model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`RobertaModel`] or [`TFRobertaModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import RobertaConfig, RobertaModel + + >>> # Initializing a RoBERTa configuration + >>> configuration = RobertaConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = RobertaModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "roberta" + + def __init__( + self, + vocab_size=50265, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + + +class RobertaOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ] + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d95354ff93978754e76809914f0d48d8461787 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py @@ -0,0 +1,178 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert RoBERTa checkpoint.""" + + +import argparse +import pathlib + +import fairseq +import torch +from fairseq.models.roberta import RobertaModel as FairseqRobertaModel +from fairseq.modules import TransformerSentenceEncoderLayer +from packaging import version + +from transformers import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification +from transformers.models.bert.modeling_bert import ( + BertIntermediate, + BertLayer, + BertOutput, + BertSelfAttention, + BertSelfOutput, +) +from transformers.utils import logging + + +if version.parse(fairseq.__version__) < version.parse("0.9.0"): + raise Exception("requires fairseq >= 0.9.0") + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +SAMPLE_TEXT = "Hello world! cécé herlolip" + + +def convert_roberta_checkpoint_to_pytorch( + roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool +): + """ + Copy/paste/tweak roberta's weights to our BERT structure. + """ + roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path) + roberta.eval() # disable dropout + roberta_sent_encoder = roberta.model.encoder.sentence_encoder + config = RobertaConfig( + vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, + hidden_size=roberta.args.encoder_embed_dim, + num_hidden_layers=roberta.args.encoder_layers, + num_attention_heads=roberta.args.encoder_attention_heads, + intermediate_size=roberta.args.encoder_ffn_embed_dim, + max_position_embeddings=514, + type_vocab_size=1, + layer_norm_eps=1e-5, # PyTorch default used in fairseq + ) + if classification_head: + config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] + print("Our BERT config:", config) + + model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config) + model.eval() + + # Now let's copy all the weights. + # Embeddings + model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight + model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight + model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like( + model.roberta.embeddings.token_type_embeddings.weight + ) # just zero them out b/c RoBERTa doesn't use them. + model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight + model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias + + for i in range(config.num_hidden_layers): + # Encoder: start of layer + layer: BertLayer = model.roberta.encoder.layer[i] + roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] + + # self attention + self_attn: BertSelfAttention = layer.attention.self + assert ( + roberta_layer.self_attn.k_proj.weight.data.shape + == roberta_layer.self_attn.q_proj.weight.data.shape + == roberta_layer.self_attn.v_proj.weight.data.shape + == torch.Size((config.hidden_size, config.hidden_size)) + ) + + self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight + self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias + self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight + self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias + self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight + self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias + + # self-attention output + self_output: BertSelfOutput = layer.attention.output + assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape + self_output.dense.weight = roberta_layer.self_attn.out_proj.weight + self_output.dense.bias = roberta_layer.self_attn.out_proj.bias + self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight + self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias + + # intermediate + intermediate: BertIntermediate = layer.intermediate + assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape + intermediate.dense.weight = roberta_layer.fc1.weight + intermediate.dense.bias = roberta_layer.fc1.bias + + # output + bert_output: BertOutput = layer.output + assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape + bert_output.dense.weight = roberta_layer.fc2.weight + bert_output.dense.bias = roberta_layer.fc2.bias + bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight + bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias + # end of layer + + if classification_head: + model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight + model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias + model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight + model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias + else: + # LM Head + model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight + model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias + model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight + model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias + model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight + model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias + + # Let's check that we get the same results. + input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1 + + our_output = model(input_ids)[0] + if classification_head: + their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids)) + else: + their_output = roberta.model(input_ids)[0] + print(our_output.shape, their_output.shape) + max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() + print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 + success = torch.allclose(our_output, their_output, atol=1e-3) + print("Do both models output the same tensors?", "🔥" if success else "💩") + if not success: + raise Exception("Something went wRoNg") + + pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) + print(f"Saving model to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--classification_head", action="store_true", help="Whether to convert a final classification head." + ) + args = parser.parse_args() + convert_roberta_checkpoint_to_pytorch( + args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_flax_roberta.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_flax_roberta.py new file mode 100644 index 0000000000000000000000000000000000000000..ecdd31386b21eb620c35a7299fea8cfd3319aecc --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_flax_roberta.py @@ -0,0 +1,1488 @@ +# coding=utf-8 +# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen import partitioning as nn_partitioning +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import ( + FlaxBaseModelOutputWithPastAndCrossAttentions, + FlaxBaseModelOutputWithPooling, + FlaxBaseModelOutputWithPoolingAndCrossAttentions, + FlaxCausalLMOutputWithCrossAttentions, + FlaxMaskedLMOutput, + FlaxMultipleChoiceModelOutput, + FlaxQuestionAnsweringModelOutput, + FlaxSequenceClassifierOutput, + FlaxTokenClassifierOutput, +) +from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_roberta import RobertaConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base" +_CONFIG_FOR_DOC = "RobertaConfig" + +remat = nn_partitioning.remat + + +def create_position_ids_from_input_ids(input_ids, padding_idx): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + input_ids: jnp.ndarray + padding_idx: int + + Returns: jnp.ndarray + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = (input_ids != padding_idx).astype("i4") + + if mask.ndim > 2: + mask = mask.reshape((-1, mask.shape[-1])) + incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask + incremental_indices = incremental_indices.reshape(input_ids.shape) + else: + incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask + + return incremental_indices.astype("i4") + padding_idx + + +ROBERTA_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading, saving and converting weights from PyTorch models) + + This model is also a + [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as + a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and + behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`RobertaConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ROBERTA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`numpy.ndarray` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + head_mask (`numpy.ndarray` of shape `({0})`, `optional): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->Roberta +class FlaxRobertaEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.word_embeddings = nn.Embed( + self.config.vocab_size, + self.config.hidden_size, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + dtype=self.dtype, + ) + self.position_embeddings = nn.Embed( + self.config.max_position_embeddings, + self.config.hidden_size, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + dtype=self.dtype, + ) + self.token_type_embeddings = nn.Embed( + self.config.type_vocab_size, + self.config.hidden_size, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + dtype=self.dtype, + ) + self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) + + def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): + # Embed + inputs_embeds = self.word_embeddings(input_ids.astype("i4")) + position_embeds = self.position_embeddings(position_ids.astype("i4")) + token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) + + # Sum all embeddings + hidden_states = inputs_embeds + token_type_embeddings + position_embeds + + # Layer Norm + hidden_states = self.LayerNorm(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + return hidden_states + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->Roberta +class FlaxRobertaSelfAttention(nn.Module): + config: RobertaConfig + causal: bool = False + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.head_dim = self.config.hidden_size // self.config.num_attention_heads + if self.config.hidden_size % self.config.num_attention_heads != 0: + raise ValueError( + "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " + " : {self.config.num_attention_heads}" + ) + + self.query = nn.Dense( + self.config.hidden_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + self.key = nn.Dense( + self.config.hidden_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + self.value = nn.Dense( + self.config.hidden_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + + if self.causal: + self.causal_mask = make_causal_mask( + jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" + ) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) + + @nn.compact + # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states, + attention_mask, + layer_head_mask, + key_value_states: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic=True, + output_attentions: bool = False, + ): + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size = hidden_states.shape[0] + + # get query proj + query_states = self.query(hidden_states) + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self.key(key_value_states) + value_states = self.value(key_value_states) + else: + # self_attention + key_states = self.key(hidden_states) + value_states = self.value(hidden_states) + + query_states = self._split_heads(query_states) + key_states = self._split_heads(key_states) + value_states = self._split_heads(value_states) + + # handle cache prepare causal attention mask + if self.causal: + query_length, key_length = query_states.shape[1], key_states.shape[1] + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + # combine masks if needed + if attention_mask is not None and self.causal: + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + elif self.causal: + attention_mask = causal_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.causal and (self.has_variable("cache", "cached_key") or init_cache): + key_states, value_states, attention_mask = self._concatenate_to_cache( + key_states, value_states, query_states, attention_mask + ) + + # Convert the boolean attention mask to an attention bias. + if attention_mask is not None: + # attention mask in the form of attention bias + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + else: + attention_bias = None + + dropout_rng = None + if not deterministic and self.config.attention_probs_dropout_prob > 0.0: + dropout_rng = self.make_rng("dropout") + + attn_weights = dot_product_attention_weights( + query_states, + key_states, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.config.attention_probs_dropout_prob, + broadcast_dropout=True, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + # Mask heads if we want to + if layer_head_mask is not None: + attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) + attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->Roberta +class FlaxRobertaSelfOutput(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dense = nn.Dense( + self.config.hidden_size, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + dtype=self.dtype, + ) + self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) + + def __call__(self, hidden_states, input_tensor, deterministic: bool = True): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Roberta +class FlaxRobertaAttention(nn.Module): + config: RobertaConfig + causal: bool = False + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.self = FlaxRobertaSelfAttention(self.config, causal=self.causal, dtype=self.dtype) + self.output = FlaxRobertaSelfOutput(self.config, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask, + layer_head_mask, + key_value_states=None, + init_cache=False, + deterministic=True, + output_attentions: bool = False, + ): + # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) + # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable + # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) + attn_outputs = self.self( + hidden_states, + attention_mask, + layer_head_mask=layer_head_mask, + key_value_states=key_value_states, + init_cache=init_cache, + deterministic=deterministic, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] + hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_outputs[1],) + + return outputs + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Roberta +class FlaxRobertaIntermediate(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dense = nn.Dense( + self.config.intermediate_size, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + dtype=self.dtype, + ) + self.activation = ACT2FN[self.config.hidden_act] + + def __call__(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.activation(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Roberta +class FlaxRobertaOutput(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dense = nn.Dense( + self.config.hidden_size, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + dtype=self.dtype, + ) + self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) + self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + + def __call__(self, hidden_states, attention_output, deterministic: bool = True): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + hidden_states = self.LayerNorm(hidden_states + attention_output) + return hidden_states + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->Roberta +class FlaxRobertaLayer(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.attention = FlaxRobertaAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) + self.intermediate = FlaxRobertaIntermediate(self.config, dtype=self.dtype) + self.output = FlaxRobertaOutput(self.config, dtype=self.dtype) + if self.config.add_cross_attention: + self.crossattention = FlaxRobertaAttention(self.config, causal=False, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + output_attentions: bool = False, + ): + # Self Attention + attention_outputs = self.attention( + hidden_states, + attention_mask, + layer_head_mask=layer_head_mask, + init_cache=init_cache, + deterministic=deterministic, + output_attentions=output_attentions, + ) + attention_output = attention_outputs[0] + + # Cross-Attention Block + if encoder_hidden_states is not None: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask=encoder_attention_mask, + layer_head_mask=layer_head_mask, + key_value_states=encoder_hidden_states, + deterministic=deterministic, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + + hidden_states = self.intermediate(attention_output) + hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attention_outputs[1],) + if encoder_hidden_states is not None: + outputs += (cross_attention_outputs[1],) + return outputs + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Roberta +class FlaxRobertaLayerCollection(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False + + def setup(self): + if self.gradient_checkpointing: + FlaxRobertaCheckpointLayer = remat(FlaxRobertaLayer, static_argnums=(5, 6, 7)) + self.layers = [ + FlaxRobertaCheckpointLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.num_hidden_layers) + ] + else: + self.layers = [ + FlaxRobertaLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + # Check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.shape[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for " + f" {head_mask.shape[0]}." + ) + + for i, layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = layer( + hidden_states, + attention_mask, + head_mask[i] if head_mask is not None else None, + encoder_hidden_states, + encoder_attention_mask, + init_cache, + deterministic, + output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Roberta +class FlaxRobertaEncoder(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False + + def setup(self): + self.layer = FlaxRobertaLayerCollection( + self.config, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) + + def __call__( + self, + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + return self.layer( + hidden_states, + attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + init_cache=init_cache, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->Roberta +class FlaxRobertaPooler(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dense = nn.Dense( + self.config.hidden_size, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + dtype=self.dtype, + ) + + def __call__(self, hidden_states): + cls_hidden_state = hidden_states[:, 0] + cls_hidden_state = self.dense(cls_hidden_state) + return nn.tanh(cls_hidden_state) + + +class FlaxRobertaLMHead(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros + + def setup(self): + self.dense = nn.Dense( + self.config.hidden_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) + self.decoder = nn.Dense( + self.config.vocab_size, + dtype=self.dtype, + use_bias=False, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) + + def __call__(self, hidden_states, shared_embedding=None): + hidden_states = self.dense(hidden_states) + hidden_states = ACT2FN["gelu"](hidden_states) + hidden_states = self.layer_norm(hidden_states) + + if shared_embedding is not None: + hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) + else: + hidden_states = self.decoder(hidden_states) + + bias = jnp.asarray(self.bias, self.dtype) + hidden_states += bias + return hidden_states + + +class FlaxRobertaClassificationHead(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.dense = nn.Dense( + self.config.hidden_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + classifier_dropout = ( + self.config.classifier_dropout + if self.config.classifier_dropout is not None + else self.config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(rate=classifier_dropout) + self.out_proj = nn.Dense( + self.config.num_labels, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + + def __call__(self, hidden_states, deterministic=True): + hidden_states = hidden_states[:, 0, :] # take token (equiv. to [CLS]) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + hidden_states = self.dense(hidden_states) + hidden_states = nn.tanh(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class FlaxRobertaPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RobertaConfig + base_model_prefix = "roberta" + + module_class: nn.Module = None + + def __init__( + self, + config: RobertaConfig, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + gradient_checkpointing: bool = False, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing + def enable_gradient_checkpointing(self): + self._module = self.module_class( + config=self.config, + dtype=self.dtype, + gradient_checkpointing=True, + ) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + token_type_ids = jnp.ones_like(input_ids) + position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) + attention_mask = jnp.ones_like(input_ids) + head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + if self.config.add_cross_attention: + encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) + encoder_attention_mask = attention_mask + module_init_outputs = self.module.init( + rngs, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + return_dict=False, + ) + else: + module_init_outputs = self.module.init( + rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False + ) + + random_params = module_init_outputs["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length), dtype="i4") + attention_mask = jnp.ones_like(input_ids, dtype="i4") + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def __call__( + self, + input_ids, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + params: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + past_key_values: dict = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # init input tensors if not passed + if token_type_ids is None: + token_type_ids = jnp.zeros_like(input_ids) + + if position_ids is None: + position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + + if head_mask is None: + head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + if self.config.add_cross_attention: + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed + # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be + # changed by FlaxRobertaAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + token_type_ids=jnp.array(token_type_ids, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + head_mask=jnp.array(head_mask, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + deterministic=not train, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + else: + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + token_type_ids=jnp.array(token_type_ids, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + head_mask=jnp.array(head_mask, dtype="i4"), + deterministic=not train, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + rngs=rngs, + ) + + return outputs + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->Roberta +class FlaxRobertaModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + add_pooling_layer: bool = True + gradient_checkpointing: bool = False + + def setup(self): + self.embeddings = FlaxRobertaEmbeddings(self.config, dtype=self.dtype) + self.encoder = FlaxRobertaEncoder( + self.config, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.pooler = FlaxRobertaPooler(self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + head_mask: Optional[jnp.ndarray] = None, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # make sure `token_type_ids` is correctly initialized when not passed + if token_type_ids is None: + token_type_ids = jnp.zeros_like(input_ids) + + # make sure `position_ids` is correctly initialized when not passed + if position_ids is None: + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + hidden_states = self.embeddings( + input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic + ) + outputs = self.encoder( + hidden_states, + attention_mask, + head_mask=head_mask, + deterministic=deterministic, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = outputs[0] + pooled = self.pooler(hidden_states) if self.add_pooling_layer else None + + if not return_dict: + # if pooled is None, don't return it + if pooled is None: + return (hidden_states,) + outputs[1:] + return (hidden_states, pooled) + outputs[1:] + + return FlaxBaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=hidden_states, + pooler_output=pooled, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +@add_start_docstrings( + "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaModel(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaModule + + +append_call_sample_docstring(FlaxRobertaModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC) + + +class FlaxRobertaForMaskedLMModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + add_pooling_layer=False, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.lm_head = FlaxRobertaLMHead(config=self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.config.tie_word_embeddings: + shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"] + else: + shared_embedding = None + + # Compute the prediction scores + logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) + + if not return_dict: + return (logits,) + outputs[1:] + + return FlaxMaskedLMOutput( + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING) +class FlaxRobertaForMaskedLM(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForMaskedLMModule + + +append_call_sample_docstring( + FlaxRobertaForMaskedLM, + _CHECKPOINT_FOR_DOC, + FlaxBaseModelOutputWithPooling, + _CONFIG_FOR_DOC, + mask="", +) + + +class FlaxRobertaForSequenceClassificationModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + dtype=self.dtype, + add_pooling_layer=False, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.classifier = FlaxRobertaClassificationHead(config=self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + logits = self.classifier(sequence_output, deterministic=deterministic) + + if not return_dict: + return (logits,) + outputs[1:] + + return FlaxSequenceClassifierOutput( + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the + pooled output) e.g. for GLUE tasks. + """, + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaForSequenceClassification(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForSequenceClassificationModule + + +append_call_sample_docstring( + FlaxRobertaForSequenceClassification, + _CHECKPOINT_FOR_DOC, + FlaxSequenceClassifierOutput, + _CONFIG_FOR_DOC, +) + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->Roberta, with self.bert->self.roberta +class FlaxRobertaForMultipleChoiceModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) + self.classifier = nn.Dense(1, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + num_choices = input_ids.shape[1] + input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None + attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None + token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None + position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None + + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + pooled_output = self.dropout(pooled_output, deterministic=deterministic) + logits = self.classifier(pooled_output) + + reshaped_logits = logits.reshape(-1, num_choices) + + if not return_dict: + return (reshaped_logits,) + outputs[2:] + + return FlaxMultipleChoiceModelOutput( + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaForMultipleChoice(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForMultipleChoiceModule + + +overwrite_call_docstring( + FlaxRobertaForMultipleChoice, ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") +) +append_call_sample_docstring( + FlaxRobertaForMultipleChoice, + _CHECKPOINT_FOR_DOC, + FlaxMultipleChoiceModelOutput, + _CONFIG_FOR_DOC, +) + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->Roberta, with self.bert->self.roberta +class FlaxRobertaForTokenClassificationModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + dtype=self.dtype, + add_pooling_layer=False, + gradient_checkpointing=self.gradient_checkpointing, + ) + classifier_dropout = ( + self.config.classifier_dropout + if self.config.classifier_dropout is not None + else self.config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(rate=classifier_dropout) + self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + logits = self.classifier(hidden_states) + + if not return_dict: + return (logits,) + outputs[1:] + + return FlaxTokenClassifierOutput( + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaForTokenClassification(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForTokenClassificationModule + + +append_call_sample_docstring( + FlaxRobertaForTokenClassification, + _CHECKPOINT_FOR_DOC, + FlaxTokenClassifierOutput, + _CONFIG_FOR_DOC, +) + + +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->Roberta, with self.bert->self.roberta +class FlaxRobertaForQuestionAnsweringModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + dtype=self.dtype, + add_pooling_layer=False, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + logits = self.qa_outputs(hidden_states) + start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + if not return_dict: + return (start_logits, end_logits) + outputs[1:] + + return FlaxQuestionAnsweringModelOutput( + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaForQuestionAnswering(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForQuestionAnsweringModule + + +append_call_sample_docstring( + FlaxRobertaForQuestionAnswering, + _CHECKPOINT_FOR_DOC, + FlaxQuestionAnsweringModelOutput, + _CONFIG_FOR_DOC, +) + + +class FlaxRobertaForCausalLMModule(nn.Module): + config: RobertaConfig + dtype: jnp.dtype = jnp.float32 + gradient_checkpointing: bool = False + + def setup(self): + self.roberta = FlaxRobertaModule( + config=self.config, + add_pooling_layer=False, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) + self.lm_head = FlaxRobertaLMHead(config=self.config, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + token_type_ids: Optional[jnp.ndarray] = None, + head_mask: Optional[jnp.ndarray] = None, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # Model + outputs = self.roberta( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + init_cache=init_cache, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.config.tie_word_embeddings: + shared_embedding = self.roberta.variables["params"]["embeddings"]["word_embeddings"]["embedding"] + else: + shared_embedding = None + + # Compute the prediction scores + logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) + + if not return_dict: + return (logits,) + outputs[1:] + + return FlaxCausalLMOutputWithCrossAttentions( + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for + autoregressive tasks. + """, + ROBERTA_START_DOCSTRING, +) +class FlaxRobertaForCausalLM(FlaxRobertaPreTrainedModel): + module_class = FlaxRobertaForCausalLMModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since the decoder uses a causal mask, those positions are masked anyway. + # Thus, we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring( + FlaxRobertaForCausalLM, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutputWithCrossAttentions, + _CONFIG_FOR_DOC, +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_roberta.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_roberta.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f15722e43bdfd98735c31c24226d5c652f5c2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_roberta.py @@ -0,0 +1,1555 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch RoBERTa model.""" + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN, gelu +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_roberta import RobertaConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base" +_CONFIG_FOR_DOC = "RobertaConfig" + + +from ..deprecated._archive_maps import ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class RobertaEmbeddings(nn.Module): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + # End copy + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + + def forward( + self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta +class RobertaSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput +class RobertaSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta +class RobertaAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = RobertaSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class RobertaIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput +class RobertaOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta +class RobertaLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = RobertaAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = RobertaAttention(config, position_embedding_type="absolute") + self.intermediate = RobertaIntermediate(config) + self.output = RobertaOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta +class RobertaEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler +class RobertaPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class RobertaPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RobertaConfig + base_model_prefix = "roberta" + supports_gradient_checkpointing = True + _no_split_modules = ["RobertaEmbeddings", "RobertaSelfAttention"] + + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +ROBERTA_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`RobertaConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ROBERTA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value + >= 2. All the value in this tensor should be always < type_vocab_size. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", + ROBERTA_START_DOCSTRING, +) +class RobertaModel(RobertaPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in *Attention is + all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz + Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + + .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 + + """ + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = RobertaEmbeddings(config) + self.encoder = RobertaEncoder(config) + + self.pooler = RobertaPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + # Copied from transformers.models.bert.modeling_bert.BertModel.forward + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING +) +class RobertaForCausalLM(RobertaPreTrainedModel): + _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.roberta = RobertaModel(config, add_pooling_layer=False) + self.lm_head = RobertaLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RobertaForCausalLM, AutoConfig + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") + >>> config = AutoConfig.from_pretrained("FacebookAI/roberta-base") + >>> config.is_decoder = True + >>> model = RobertaForCausalLM.from_pretrained("FacebookAI/roberta-base", config=config) + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + lm_loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(prediction_scores.device) + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} + + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING) +class RobertaForMaskedLM(RobertaPreTrainedModel): + _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.roberta = RobertaModel(config, add_pooling_layer=False) + self.lm_head = RobertaLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + expected_output="' Paris'", + expected_loss=0.1, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + masked_lm_loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(prediction_scores.device) + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class RobertaLMHead(nn.Module): + """Roberta Head for masked language modeling.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.decoder = nn.Linear(config.hidden_size, config.vocab_size) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + self.decoder.bias = self.bias + + def forward(self, features, **kwargs): + x = self.dense(features) + x = gelu(x) + x = self.layer_norm(x) + + # project back to size of vocabulary with bias + x = self.decoder(x) + + return x + + def _tie_weights(self): + # To tie those two weights if they get disconnected (on TPU or when the bias is resized) + # For accelerate compatibility and to not break backward compatibility + if self.decoder.bias.device.type == "meta": + self.decoder.bias = self.bias + else: + self.bias = self.decoder.bias + + +@add_start_docstrings( + """ + RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the + pooled output) e.g. for GLUE tasks. + """, + ROBERTA_START_DOCSTRING, +) +class RobertaForSequenceClassification(RobertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.roberta = RobertaModel(config, add_pooling_layer=False) + self.classifier = RobertaClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="cardiffnlp/twitter-roberta-base-emotion", + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'optimism'", + expected_loss=0.08, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + ROBERTA_START_DOCSTRING, +) +class RobertaForMultipleChoice(RobertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.roberta = RobertaModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + flat_inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.roberta( + flat_input_ids, + position_ids=flat_position_ids, + token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, + head_mask=head_mask, + inputs_embeds=flat_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(reshaped_logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + ROBERTA_START_DOCSTRING, +) +class RobertaForTokenClassification(RobertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roberta = RobertaModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="Jean-Baptiste/roberta-large-ner-english", + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", + expected_loss=0.01, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class RobertaClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +@add_start_docstrings( + """ + Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + ROBERTA_START_DOCSTRING, +) +class RobertaForQuestionAnswering(RobertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roberta = RobertaModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="deepset/roberta-base-squad2", + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="' puppet'", + expected_loss=0.86, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_tf_roberta.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_tf_roberta.py new file mode 100644 index 0000000000000000000000000000000000000000..f48bb796c17b4c369995e9ab134cfee2f2113782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/modeling_tf_roberta.py @@ -0,0 +1,1774 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 RoBERTa model.""" + + +from __future__ import annotations + +import math +import warnings +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithPastAndCrossAttentions, + TFBaseModelOutputWithPoolingAndCrossAttentions, + TFCausalLMOutputWithCrossAttentions, + TFMaskedLMOutput, + TFMultipleChoiceModelOutput, + TFQuestionAnsweringModelOutput, + TFSequenceClassifierOutput, + TFTokenClassifierOutput, +) +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFMaskedLanguageModelingLoss, + TFModelInputType, + TFMultipleChoiceLoss, + TFPreTrainedModel, + TFQuestionAnsweringLoss, + TFSequenceClassificationLoss, + TFTokenClassificationLoss, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_roberta import RobertaConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "FacebookAI/roberta-base" +_CONFIG_FOR_DOC = "RobertaConfig" + + +from ..deprecated._archive_maps import TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +class TFRobertaEmbeddings(keras.layers.Layer): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.padding_idx = 1 + self.config = config + self.hidden_size = config.hidden_size + self.max_position_embeddings = config.max_position_embeddings + self.initializer_range = config.initializer_range + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def build(self, input_shape=None): + with tf.name_scope("word_embeddings"): + self.weight = self.add_weight( + name="weight", + shape=[self.config.vocab_size, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("token_type_embeddings"): + self.token_type_embeddings = self.add_weight( + name="embeddings", + shape=[self.config.type_vocab_size, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("position_embeddings"): + self.position_embeddings = self.add_weight( + name="embeddings", + shape=[self.max_position_embeddings, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + if self.built: + return + self.built = True + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding + symbols are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + input_ids: tf.Tensor + Returns: tf.Tensor + """ + mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) + incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask + + return incremental_indices + self.padding_idx + + def call( + self, + input_ids=None, + position_ids=None, + token_type_ids=None, + inputs_embeds=None, + past_key_values_length=0, + training=False, + ): + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + assert not (input_ids is None and inputs_embeds is None) + + if input_ids is not None: + check_embeddings_within_bounds(input_ids, self.config.vocab_size) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = self.create_position_ids_from_input_ids( + input_ids=input_ids, past_key_values_length=past_key_values_length + ) + else: + position_ids = tf.expand_dims( + tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0 + ) + + position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) + token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) + final_embeddings = inputs_embeds + position_embeds + token_type_embeds + final_embeddings = self.LayerNorm(inputs=final_embeddings) + final_embeddings = self.dropout(inputs=final_embeddings, training=training) + + return final_embeddings + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Roberta +class TFRobertaPooler(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + units=config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + activation="tanh", + name="dense", + ) + self.config = config + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(inputs=first_token_tensor) + + return pooled_output + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Roberta +class TFRobertaSelfAttention(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.sqrt_att_head_size = math.sqrt(self.attention_head_size) + + self.query = keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + ) + self.key = keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + ) + self.value = keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + ) + self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) + + self.is_decoder = config.is_decoder + self.config = config + + def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) + + # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] + return tf.transpose(tensor, perm=[0, 2, 1, 3]) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor, + encoder_attention_mask: tf.Tensor, + past_key_value: Tuple[tf.Tensor], + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.query(inputs=hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) + key_layer = tf.concat([past_key_value[0], key_layer], axis=2) + value_layer = tf.concat([past_key_value[1], value_layer], axis=2) + else: + key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) + attention_scores = tf.divide(attention_scores, dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFRobertaModel call() function) + attention_scores = tf.add(attention_scores, attention_mask) + + # Normalize the attention scores to probabilities. + attention_probs = stable_softmax(logits=attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(inputs=attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = tf.multiply(attention_probs, head_mask) + + attention_output = tf.matmul(attention_probs, value_layer) + attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) + + # (batch_size, seq_len_q, all_head_size) + attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "query", None) is not None: + with tf.name_scope(self.query.name): + self.query.build([None, None, self.config.hidden_size]) + if getattr(self, "key", None) is not None: + with tf.name_scope(self.key.name): + self.key.build([None, None, self.config.hidden_size]) + if getattr(self, "value", None) is not None: + with tf.name_scope(self.value.name): + self.value.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Roberta +class TFRobertaSelfOutput(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Roberta +class TFRobertaAttention(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.self_attention = TFRobertaSelfAttention(config, name="self") + self.dense_output = TFRobertaSelfOutput(config, name="output") + + def prune_heads(self, heads): + raise NotImplementedError + + def call( + self, + input_tensor: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor, + encoder_attention_mask: tf.Tensor, + past_key_value: Tuple[tf.Tensor], + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + self_outputs = self.self_attention( + hidden_states=input_tensor, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = self.dense_output( + hidden_states=self_outputs[0], input_tensor=input_tensor, training=training + ) + # add attentions (possibly with past_key_value) if we output them + outputs = (attention_output,) + self_outputs[1:] + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attention", None) is not None: + with tf.name_scope(self.self_attention.name): + self.self_attention.build(None) + if getattr(self, "dense_output", None) is not None: + with tf.name_scope(self.dense_output.name): + self.dense_output.build(None) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Roberta +class TFRobertaIntermediate(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + self.config = config + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Roberta +class TFRobertaOutput(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.intermediate_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Roberta +class TFRobertaLayer(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + + self.attention = TFRobertaAttention(config, name="attention") + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = TFRobertaAttention(config, name="crossattention") + self.intermediate = TFRobertaIntermediate(config, name="intermediate") + self.bert_output = TFRobertaOutput(config, name="output") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor | None, + encoder_attention_mask: tf.Tensor | None, + past_key_value: Tuple[tf.Tensor] | None, + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + input_tensor=hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=self_attn_past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + input_tensor=attention_output, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + intermediate_output = self.intermediate(hidden_states=attention_output) + layer_output = self.bert_output( + hidden_states=intermediate_output, input_tensor=attention_output, training=training + ) + outputs = (layer_output,) + outputs # add attentions if we output them + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "attention", None) is not None: + with tf.name_scope(self.attention.name): + self.attention.build(None) + if getattr(self, "intermediate", None) is not None: + with tf.name_scope(self.intermediate.name): + self.intermediate.build(None) + if getattr(self, "bert_output", None) is not None: + with tf.name_scope(self.bert_output.name): + self.bert_output.build(None) + if getattr(self, "crossattention", None) is not None: + with tf.name_scope(self.crossattention.name): + self.crossattention.build(None) + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Roberta +class TFRobertaEncoder(keras.layers.Layer): + def __init__(self, config: RobertaConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.layer = [TFRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor | None, + encoder_attention_mask: tf.Tensor | None, + past_key_values: Tuple[Tuple[tf.Tensor]] | None, + use_cache: Optional[bool], + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + past_key_value = past_key_values[i] if past_key_values is not None else None + + layer_outputs = layer_module( + hidden_states=hidden_states, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + training=training, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + if self.config.add_cross_attention and encoder_hidden_states is not None: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None + ) + + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layer", None) is not None: + for layer in self.layer: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFRobertaMainLayer(keras.layers.Layer): + config_class = RobertaConfig + + def __init__(self, config, add_pooling_layer=True, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.is_decoder = config.is_decoder + + self.num_hidden_layers = config.num_hidden_layers + self.initializer_range = config.initializer_range + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.return_dict = config.use_return_dict + self.encoder = TFRobertaEncoder(config, name="encoder") + self.pooler = TFRobertaPooler(config, name="pooler") if add_pooling_layer else None + # The embeddings must be the last declaration in order to follow the weights order + self.embeddings = TFRobertaEmbeddings(config, name="embeddings") + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings + def get_input_embeddings(self) -> keras.layers.Layer: + return self.embeddings + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings + def set_input_embeddings(self, value: tf.Variable): + self.embeddings.weight = value + self.embeddings.vocab_size = shape_list(value)[0] + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError + + @unpack_inputs + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: + if not self.config.is_decoder: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + + if past_key_values is None: + past_key_values_length = 0 + past_key_values = [None] * len(self.encoder.layer) + else: + past_key_values_length = shape_list(past_key_values[0][0])[-2] + + if attention_mask is None: + attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + training=training, + ) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask_shape = shape_list(attention_mask) + + mask_seq_length = seq_length + past_key_values_length + # Copied from `modeling_tf_t5.py` + # Provided a padding mask of dimensions [batch_size, mask_seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] + if self.is_decoder: + seq_ids = tf.range(mask_seq_length) + causal_mask = tf.less_equal( + tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), + seq_ids[None, :, None], + ) + causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) + extended_attention_mask = causal_mask * attention_mask[:, None, :] + attention_mask_shape = shape_list(extended_attention_mask) + extended_attention_mask = tf.reshape( + extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) + ) + if past_key_values[0] is not None: + # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] + extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] + else: + extended_attention_mask = tf.reshape( + attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) + one_cst = tf.constant(1.0, dtype=embedding_output.dtype) + ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) + extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) + + # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 + if self.is_decoder and encoder_attention_mask is not None: + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) + num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) + if num_dims_encoder_attention_mask == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if num_dims_encoder_attention_mask == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, + # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) + + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + raise NotImplementedError + else: + head_mask = [None] * self.config.num_hidden_layers + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None + + if not return_dict: + return ( + sequence_output, + pooled_output, + ) + encoder_outputs[1:] + + return TFBaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "pooler", None) is not None: + with tf.name_scope(self.pooler.name): + self.pooler.build(None) + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + + +class TFRobertaPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RobertaConfig + base_model_prefix = "roberta" + + +ROBERTA_START_DOCSTRING = r""" + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Parameters: + config ([`RobertaConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ROBERTA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", + ROBERTA_START_DOCSTRING, +) +class TFRobertaModel(TFRobertaPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.roberta = TFRobertaMainLayer(config, name="roberta") + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + """ + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + + +class TFRobertaLMHead(keras.layers.Layer): + """Roberta Head for masked language modeling.""" + + def __init__(self, config, input_embeddings, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.hidden_size = config.hidden_size + self.dense = keras.layers.Dense( + config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") + self.act = get_tf_activation("gelu") + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = input_embeddings + + def build(self, input_shape=None): + self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") + + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "layer_norm", None) is not None: + with tf.name_scope(self.layer_norm.name): + self.layer_norm.build([None, None, self.config.hidden_size]) + + def get_output_embeddings(self): + return self.decoder + + def set_output_embeddings(self, value): + self.decoder.weight = value + self.decoder.vocab_size = shape_list(value)[0] + + def get_bias(self): + return {"bias": self.bias} + + def set_bias(self, value): + self.bias = value["bias"] + self.config.vocab_size = shape_list(value["bias"])[0] + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.layer_norm(hidden_states) + + # project back to size of vocabulary with bias + seq_length = shape_list(tensor=hidden_states)[1] + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) + hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) + hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) + + return hidden_states + + +@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top.""", ROBERTA_START_DOCSTRING) +class TFRobertaForMaskedLM(TFRobertaPreTrainedModel, TFMaskedLanguageModelingLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta") + self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head") + + def get_lm_head(self): + return self.lm_head + + def get_prefix_bias_name(self): + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return self.name + "/" + self.lm_head.name + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + expected_output="' Paris'", + expected_loss=0.1, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFMaskedLMOutput( + loss=loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "lm_head", None) is not None: + with tf.name_scope(self.lm_head.name): + self.lm_head.build(None) + + +class TFRobertaForCausalLM(TFRobertaPreTrainedModel, TFCausalLanguageModelingLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] + + def __init__(self, config: RobertaConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + if not config.is_decoder: + logger.warning("If you want to use `TFRobertaLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta") + self.lm_head = TFRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head") + + def get_lm_head(self): + return self.lm_head + + def get_prefix_bias_name(self): + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return self.name + "/" + self.lm_head.name + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = tf.ones(input_shape) + + # cut decoder_input_ids if past is used + if past_key_values is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFCausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., + config.vocab_size - 1]`. + """ + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + logits = self.lm_head(hidden_states=sequence_output, training=training) + loss = None + + if labels is not None: + # shift labels to the left and cut last logit token + shifted_logits = logits[:, :-1] + labels = labels[:, 1:] + loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFCausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "lm_head", None) is not None: + with tf.name_scope(self.lm_head.name): + self.lm_head.build(None) + + +class TFRobertaClassificationHead(keras.layers.Layer): + """Head for sentence-level classification tasks.""" + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + activation="tanh", + name="dense", + ) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = keras.layers.Dropout(classifier_dropout) + self.out_proj = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" + ) + self.config = config + + def call(self, features, training=False): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x, training=training) + x = self.dense(x) + x = self.dropout(x, training=training) + x = self.out_proj(x) + return x + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the + pooled output) e.g. for GLUE tasks. + """, + ROBERTA_START_DOCSTRING, +) +class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel, TFSequenceClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta") + self.classifier = TFRobertaClassificationHead(config, name="classifier") + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="cardiffnlp/twitter-roberta-base-emotion", + output_type=TFSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'optimism'", + expected_loss=0.08, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output, training=training) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build(None) + + +@add_start_docstrings( + """ + Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + ROBERTA_START_DOCSTRING, +) +class TFRobertaForMultipleChoice(TFRobertaPreTrainedModel, TFMultipleChoiceLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"lm_head"] + _keys_to_ignore_on_load_missing = [r"dropout"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.roberta = TFRobertaMainLayer(config, name="roberta") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = keras.layers.Dense( + 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` + where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) + """ + + if input_ids is not None: + num_choices = shape_list(input_ids)[1] + seq_length = shape_list(input_ids)[2] + else: + num_choices = shape_list(inputs_embeds)[1] + seq_length = shape_list(inputs_embeds)[2] + + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None + flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None + flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None + flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None + outputs = self.roberta( + flat_input_ids, + flat_attention_mask, + flat_token_type_ids, + flat_position_ids, + head_mask, + inputs_embeds, + output_attentions, + output_hidden_states, + return_dict=return_dict, + training=training, + ) + pooled_output = outputs[1] + pooled_output = self.dropout(pooled_output, training=training) + logits = self.classifier(pooled_output) + reshaped_logits = tf.reshape(logits, (-1, num_choices)) + + loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFMultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + ROBERTA_START_DOCSTRING, +) +class TFRobertaForTokenClassification(TFRobertaPreTrainedModel, TFTokenClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] + _keys_to_ignore_on_load_missing = [r"dropout"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta") + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = keras.layers.Dropout(classifier_dropout) + self.classifier = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="ydshieh/roberta-large-ner-english", + output_type=TFTokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", + expected_loss=0.01, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output, training=training) + logits = self.classifier(sequence_output) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFTokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + ROBERTA_START_DOCSTRING, +) +class TFRobertaForQuestionAnswering(TFRobertaPreTrainedModel, TFQuestionAnsweringLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFRobertaMainLayer(config, add_pooling_layer=False, name="roberta") + self.qa_outputs = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="ydshieh/roberta-base-squad2", + output_type=TFQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="' puppet'", + expected_loss=0.86, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: np.ndarray | tf.Tensor | None = None, + end_positions: np.ndarray | tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: + r""" + start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = tf.split(logits, 2, axis=-1) + start_logits = tf.squeeze(start_logits, axis=-1) + end_logits = tf.squeeze(end_logits, axis=-1) + + loss = None + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions} + labels["end_position"] = end_positions + loss = self.hf_compute_loss(labels, (start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "roberta", None) is not None: + with tf.name_scope(self.roberta.name): + self.roberta.build(None) + if getattr(self, "qa_outputs", None) is not None: + with tf.name_scope(self.qa_outputs.name): + self.qa_outputs.build([None, None, self.config.hidden_size]) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta.py new file mode 100644 index 0000000000000000000000000000000000000000..072c44ac4dd35900196e6f5d22534e82b54a44ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta.py @@ -0,0 +1,399 @@ +# coding=utf-8 +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for RoBERTa.""" + +import json +import os +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "merges_file": "merges.txt", +} + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class RobertaTokenizer(PreTrainedTokenizer): + """ + Constructs a RoBERTa tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import RobertaTokenizer + + >>> tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base") + >>> tokenizer("Hello world")["input_ids"] + [0, 31414, 232, 2] + + >>> tokenizer(" Hello world")["input_ids"] + [0, 20920, 232, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (RoBERTa tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + **kwargs, + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token + cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token + + # Mask token behave like a normal word, i.e. include the space before it + mask_token = ( + AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) + if isinstance(mask_token, str) + else mask_token + ) + + # these special tokens are not part of the vocab.json, let's add them in the correct order + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + super().__init__( + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + vocab = dict(self.encoder).copy() + vocab.update(self.added_tokens_encoder) + return vocab + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A RoBERTa sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): + text = " " + text + return (text, kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta_fast.py b/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..702af8a33e1b94106854b7ff7055f5a872ed4e43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roberta/tokenization_roberta_fast.py @@ -0,0 +1,268 @@ +# coding=utf-8 +# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fast Tokenization classes for RoBERTa.""" +import json +from typing import List, Optional, Tuple + +from tokenizers import pre_tokenizers, processors + +from ...tokenization_utils_base import AddedToken, BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_roberta import RobertaTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + + +class RobertaTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 + tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ```python + >>> from transformers import RobertaTokenizerFast + + >>> tokenizer = RobertaTokenizerFast.from_pretrained("FacebookAI/roberta-base") + >>> tokenizer("Hello world")["input_ids"] + [0, 31414, 232, 2] + + >>> tokenizer(" Hello world")["input_ids"] + [0, 20920, 232, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. + + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (RoBERTa tokenizer detect beginning of words by the preceding space). + trim_offsets (`bool`, *optional*, defaults to `True`): + Whether the post processing step should trim offsets to avoid including whitespaces. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = RobertaTokenizer + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + trim_offsets=True, + **kwargs, + ): + mask_token = ( + AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) + if isinstance(mask_token, str) + else mask_token + ) + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + sep_token=sep_token, + cls_token=cls_token, + unk_token=unk_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + trim_offsets=trim_offsets, + **kwargs, + ) + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + tokenizer_component = "post_processor" + tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) + if tokenizer_component_instance: + state = json.loads(tokenizer_component_instance.__getstate__()) + + # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` + if "sep" in state: + state["sep"] = tuple(state["sep"]) + if "cls" in state: + state["cls"] = tuple(state["cls"]) + + changes_to_apply = False + + if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + state["add_prefix_space"] = add_prefix_space + changes_to_apply = True + + if state.get("trim_offsets", trim_offsets) != trim_offsets: + state["trim_offsets"] = trim_offsets + changes_to_apply = True + + if changes_to_apply: + component_class = getattr(processors, state.pop("type")) + new_value = component_class(**state) + setattr(self.backend_tokenizer, tokenizer_component, new_value) + + @property + def mask_token(self) -> str: + """ + `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not + having been set. + + Roberta tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily + comprise the space before the **. + """ + if self._mask_token is None: + if self.verbose: + logger.error("Using mask_token, but it is not set yet.") + return None + return str(self._mask_token) + + @mask_token.setter + def mask_token(self, value): + """ + Overriding the default behavior of the mask token to have it eat the space before it. + + This is needed to preserve backward compatibility with all the previously used models based on Roberta. + """ + # Mask token behave like a normal word, i.e. include the space before it + # So we set lstrip to True + value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value + self._mask_token = value + + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] + if token_ids_1 is None: + return output + + return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/sam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e8006e89e0f11d0c737697649adf654314612ec5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/__init__.py @@ -0,0 +1,105 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tf_available, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_sam": [ + "SAM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "SamConfig", + "SamMaskDecoderConfig", + "SamPromptEncoderConfig", + "SamVisionConfig", + ], + "processing_sam": ["SamProcessor"], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_sam"] = [ + "SAM_PRETRAINED_MODEL_ARCHIVE_LIST", + "SamModel", + "SamPreTrainedModel", + ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_sam"] = [ + "TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSamModel", + "TFSamPreTrainedModel", + ] +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_sam"] = ["SamImageProcessor"] + + +if TYPE_CHECKING: + from .configuration_sam import ( + SAM_PRETRAINED_CONFIG_ARCHIVE_MAP, + SamConfig, + SamMaskDecoderConfig, + SamPromptEncoderConfig, + SamVisionConfig, + ) + from .processing_sam import SamProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_sam import SAM_PRETRAINED_MODEL_ARCHIVE_LIST, SamModel, SamPreTrainedModel + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_sam import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST, TFSamModel, TFSamPreTrainedModel + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_sam import SamImageProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d08ff0e01c704b7127874429a1b975ba2469e557 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e25f79a8fb8a1fa93dd6d41ef999035b8571d559 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/configuration_sam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ec97c18ca84dde93a9e589b598c6b2a2be44c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/convert_sam_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92eb347073cde36205f164ac153667360f9834b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/image_processing_sam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62a38f9a2de5470943971d7f5316a180963f3e52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_sam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6f30dd5aa03aa19d10fc9a8ea1fba026182aec2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/modeling_tf_sam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d7714daef79f12cd30a4c5ef1bef7b564156da Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/sam/__pycache__/processing_sam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py b/venv/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..5afe75eb8eae4352dd0bd0100d04eee7b4dd994f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/configuration_sam.py @@ -0,0 +1,309 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" SAM model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import SAM_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class SamPromptEncoderConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SamPromptEncoder`]. The [`SamPromptEncoder`] + module is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield + a similar configuration to that of the SAM-vit-h + [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 256): + Dimensionality of the hidden states. + image_size (`int`, *optional*, defaults to 1024): + The expected output resolution of the image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + mask_input_channels (`int`, *optional*, defaults to 16): + The number of channels to be fed to the `MaskDecoder` module. + num_point_embeddings (`int`, *optional*, defaults to 4): + The number of point embeddings to be used. + hidden_act (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function in the encoder and pooler. + """ + + def __init__( + self, + hidden_size=256, + image_size=1024, + patch_size=16, + mask_input_channels=16, + num_point_embeddings=4, + hidden_act="gelu", + layer_norm_eps=1e-6, + **kwargs, + ): + super().__init__(**kwargs) + self.hidden_size = hidden_size + self.image_size = image_size + self.patch_size = patch_size + self.image_embedding_size = image_size // patch_size + self.mask_input_channels = mask_input_channels + self.num_point_embeddings = num_point_embeddings + self.hidden_act = hidden_act + self.layer_norm_eps = layer_norm_eps + + +class SamMaskDecoderConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SamMaskDecoder`]. It is used to instantiate a SAM + mask decoder to the specified arguments, defining the model architecture. Instantiating a configuration defaults + will yield a similar configuration to that of the SAM-vit-h + [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 256): + Dimensionality of the hidden states. + hidden_act (`str`, *optional*, defaults to `"relu"`): + The non-linear activation function used inside the `SamMaskDecoder` module. + mlp_dim (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 2): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + attention_downsample_rate (`int`, *optional*, defaults to 2): + The downsampling rate of the attention layer. + num_multimask_outputs (`int`, *optional*, defaults to 3): + The number of outputs from the `SamMaskDecoder` module. In the Segment Anything paper, this is set to 3. + iou_head_depth (`int`, *optional*, defaults to 3): + The number of layers in the IoU head module. + iou_head_hidden_dim (`int`, *optional*, defaults to 256): + The dimensionality of the hidden states in the IoU head module. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + + """ + + def __init__( + self, + hidden_size=256, + hidden_act="relu", + mlp_dim=2048, + num_hidden_layers=2, + num_attention_heads=8, + attention_downsample_rate=2, + num_multimask_outputs=3, + iou_head_depth=3, + iou_head_hidden_dim=256, + layer_norm_eps=1e-6, + **kwargs, + ): + super().__init__(**kwargs) + self.hidden_size = hidden_size + self.hidden_act = hidden_act + self.mlp_dim = mlp_dim + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.attention_downsample_rate = attention_downsample_rate + self.num_multimask_outputs = num_multimask_outputs + self.iou_head_depth = iou_head_depth + self.iou_head_hidden_dim = iou_head_hidden_dim + self.layer_norm_eps = layer_norm_eps + + +class SamVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SamVisionModel`]. It is used to instantiate a SAM + vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration + defaults will yield a similar configuration to that of the SAM ViT-h + [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + output_channels (`int`, *optional*, defaults to 256): + Dimensionality of the output channels in the Patch Encoder. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + num_channels (`int`, *optional*, defaults to 3): + Number of channels in the input image. + image_size (`int`, *optional*, defaults to 1024): + Expected resolution. Target size of the resized input image. + patch_size (`int`, *optional*, defaults to 16): + Size of the patches to be extracted from the input image. + hidden_act (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 1e-10): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to query, key, value projections. + mlp_ratio (`float`, *optional*, defaults to 4.0): + Ratio of mlp hidden dim to embedding dim. + use_abs_pos (`bool`, *optional*, defaults to `True`): + Whether to use absolute position embedding. + use_rel_pos (`bool`, *optional*, defaults to `True`): + Whether to use relative position embedding. + window_size (`int`, *optional*, defaults to 14): + Window size for relative position. + global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`): + The indexes of the global attention layers. + num_pos_feats (`int`, *optional*, defaults to 128): + The dimensionality of the position embedding. + mlp_dim (`int`, *optional*): + The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio * + hidden_size`. + """ + + def __init__( + self, + hidden_size=768, + output_channels=256, + num_hidden_layers=12, + num_attention_heads=12, + num_channels=3, + image_size=1024, + patch_size=16, + hidden_act="gelu", + layer_norm_eps=1e-06, + attention_dropout=0.0, + initializer_range=1e-10, + qkv_bias=True, + mlp_ratio=4.0, + use_abs_pos=True, + use_rel_pos=True, + window_size=14, + global_attn_indexes=[2, 5, 8, 11], + num_pos_feats=128, + mlp_dim=None, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.output_channels = output_channels + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.num_channels = num_channels + self.image_size = image_size + self.patch_size = patch_size + self.hidden_act = hidden_act + self.layer_norm_eps = layer_norm_eps + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.qkv_bias = qkv_bias + self.mlp_ratio = mlp_ratio + self.use_abs_pos = use_abs_pos + self.use_rel_pos = use_rel_pos + self.window_size = window_size + self.global_attn_indexes = global_attn_indexes + self.num_pos_feats = num_pos_feats + self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim + + +class SamConfig(PretrainedConfig): + r""" + [`SamConfig`] is the configuration class to store the configuration of a [`SamModel`]. It is used to instantiate a + SAM model according to the specified arguments, defining the vision model, prompt-encoder model and mask decoder + configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the + SAM-ViT-H [facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + vision_config (Union[`dict`, `SamVisionConfig`], *optional*): + Dictionary of configuration options used to initialize [`SamVisionConfig`]. + prompt_encoder_config (Union[`dict`, `SamPromptEncoderConfig`], *optional*): + Dictionary of configuration options used to initialize [`SamPromptEncoderConfig`]. + mask_decoder_config (Union[`dict`, `SamMaskDecoderConfig`], *optional*): + Dictionary of configuration options used to initialize [`SamMaskDecoderConfig`]. + + kwargs (*optional*): + Dictionary of keyword arguments. + + Example: + + ```python + >>> from transformers import ( + ... SamVisionConfig, + ... SamPromptEncoderConfig, + ... SamMaskDecoderConfig, + ... SamModel, + ... ) + + >>> # Initializing a SamConfig with `"facebook/sam-vit-huge"` style configuration + >>> configuration = SamConfig() + + >>> # Initializing a SamModel (with random weights) from the `"facebook/sam-vit-huge"` style configuration + >>> model = SamModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + + >>> # We can also initialize a SamConfig from a SamVisionConfig, SamPromptEncoderConfig, and SamMaskDecoderConfig + + >>> # Initializing SAM vision, SAM Q-Former and language model configurations + >>> vision_config = SamVisionConfig() + >>> prompt_encoder_config = SamPromptEncoderConfig() + >>> mask_decoder_config = SamMaskDecoderConfig() + + >>> config = SamConfig(vision_config, prompt_encoder_config, mask_decoder_config) + ```""" + + model_type = "sam" + + def __init__( + self, + vision_config=None, + prompt_encoder_config=None, + mask_decoder_config=None, + initializer_range=0.02, + **kwargs, + ): + super().__init__(**kwargs) + vision_config = vision_config if vision_config is not None else {} + prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {} + mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {} + + if isinstance(vision_config, SamVisionConfig): + vision_config = vision_config.to_dict() + if isinstance(prompt_encoder_config, SamPromptEncoderConfig): + prompt_encoder_config = prompt_encoder_config.to_dict() + if isinstance(mask_decoder_config, SamMaskDecoderConfig): + mask_decoder_config = mask_decoder_config.to_dict() + + self.vision_config = SamVisionConfig(**vision_config) + self.prompt_encoder_config = SamPromptEncoderConfig(**prompt_encoder_config) + self.mask_decoder_config = SamMaskDecoderConfig(**mask_decoder_config) + self.initializer_range = initializer_range diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/convert_sam_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/sam/convert_sam_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..be375494f059d0a2c5065ab59375997b8a8c4798 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/convert_sam_to_hf.py @@ -0,0 +1,250 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Convert SAM checkpoints from the original repository. + +URL: https://github.com/facebookresearch/segment-anything. + +Also supports converting the SlimSAM checkpoints from https://github.com/czg1225/SlimSAM/tree/master. +""" +import argparse +import re + +import numpy as np +import requests +import torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from transformers import ( + SamConfig, + SamImageProcessor, + SamModel, + SamProcessor, + SamVisionConfig, +) + + +def get_config(model_name): + if "slimsam-50" in model_name: + vision_config = SamVisionConfig( + hidden_size=384, + mlp_dim=1536, + num_hidden_layers=12, + num_attention_heads=12, + global_attn_indexes=[2, 5, 8, 11], + ) + elif "slimsam-77" in model_name: + vision_config = SamVisionConfig( + hidden_size=168, + mlp_dim=696, + num_hidden_layers=12, + num_attention_heads=12, + global_attn_indexes=[2, 5, 8, 11], + ) + elif "sam_vit_b" in model_name: + vision_config = SamVisionConfig() + elif "sam_vit_l" in model_name: + vision_config = SamVisionConfig( + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + global_attn_indexes=[5, 11, 17, 23], + ) + elif "sam_vit_h" in model_name: + vision_config = SamVisionConfig( + hidden_size=1280, + num_hidden_layers=32, + num_attention_heads=16, + global_attn_indexes=[7, 15, 23, 31], + ) + + config = SamConfig( + vision_config=vision_config, + ) + + return config + + +KEYS_TO_MODIFY_MAPPING = { + "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", + "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", + "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", + "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", + "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", + "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", + "mask_downscaling.0": "mask_embed.conv1", + "mask_downscaling.1": "mask_embed.layer_norm1", + "mask_downscaling.3": "mask_embed.conv2", + "mask_downscaling.4": "mask_embed.layer_norm2", + "mask_downscaling.6": "mask_embed.conv3", + "point_embeddings": "point_embed", + "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", + "image_encoder": "vision_encoder", + "neck.0": "neck.conv1", + "neck.1": "neck.layer_norm1", + "neck.2": "neck.conv2", + "neck.3": "neck.layer_norm2", + "patch_embed.proj": "patch_embed.projection", + ".norm": ".layer_norm", + "blocks": "layers", +} + + +def replace_keys(state_dict): + model_state_dict = {} + state_dict.pop("pixel_mean", None) + state_dict.pop("pixel_std", None) + + output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" + + for key, value in state_dict.items(): + for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): + if key_to_modify in key: + key = key.replace(key_to_modify, new_key) + + if re.match(output_hypernetworks_mlps_pattern, key): + layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2)) + if layer_nb == 0: + key = key.replace("layers.0", "proj_in") + elif layer_nb == 1: + key = key.replace("layers.1", "layers.0") + elif layer_nb == 2: + key = key.replace("layers.2", "proj_out") + + model_state_dict[key] = value + + model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[ + "prompt_encoder.shared_embedding.positional_embedding" + ] + + return model_state_dict + + +def convert_sam_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub): + config = get_config(model_name) + + state_dict = torch.load(checkpoint_path, map_location="cpu") + state_dict = replace_keys(state_dict) + + image_processor = SamImageProcessor() + processor = SamProcessor(image_processor=image_processor) + hf_model = SamModel(config) + hf_model.eval() + + device = "cuda" if torch.cuda.is_available() else "cpu" + + hf_model.load_state_dict(state_dict) + hf_model = hf_model.to(device) + + img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" + raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") + + input_points = [[[500, 375]]] + input_labels = [[1]] + + inputs = processor(images=np.array(raw_image), return_tensors="pt").to(device) + + with torch.no_grad(): + output = hf_model(**inputs) + scores = output.iou_scores.squeeze() + + if model_name == "sam_vit_b_01ec64": + inputs = processor( + images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" + ).to(device) + + with torch.no_grad(): + output = hf_model(**inputs) + scores = output.iou_scores.squeeze() + + elif model_name == "sam_vit_h_4b8939": + inputs = processor( + images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" + ).to(device) + + with torch.no_grad(): + output = hf_model(**inputs) + scores = output.iou_scores.squeeze() + + assert scores[-1].item() == 0.9712603092193604 + + input_boxes = ((75, 275, 1725, 850),) + + inputs = processor(images=np.array(raw_image), input_boxes=input_boxes, return_tensors="pt").to(device) + + with torch.no_grad(): + output = hf_model(**inputs) + scores = output.iou_scores.squeeze() + + assert scores[-1].item() == 0.8686015605926514 + + # Test with 2 points and 1 image. + input_points = [[[400, 650], [800, 650]]] + input_labels = [[1, 1]] + + inputs = processor( + images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" + ).to(device) + + with torch.no_grad(): + output = hf_model(**inputs) + scores = output.iou_scores.squeeze() + + assert scores[-1].item() == 0.9936047792434692 + + if pytorch_dump_folder is not None: + processor.save_pretrained(pytorch_dump_folder) + hf_model.save_pretrained(pytorch_dump_folder) + + if push_to_hub: + repo_id = f"nielsr/{model_name}" if "slimsam" in model_name else f"meta/{model_name}" + processor.push_to_hub(repo_id) + hf_model.push_to_hub(repo_id) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + choices = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195", "slimsam-50-uniform", "slimsam-77-uniform"] + parser.add_argument( + "--model_name", + default="sam_vit_h_4b8939", + choices=choices, + type=str, + help="Name of the original model to convert", + ) + parser.add_argument( + "--checkpoint_path", + type=str, + required=False, + help="Path to the original checkpoint", + ) + parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument( + "--push_to_hub", + action="store_true", + help="Whether to push the model and processor to the hub after converting", + ) + + args = parser.parse_args() + + if "slimsam" in args.model_name: + checkpoint_path = args.checkpoint_path + if checkpoint_path is None: + raise ValueError("You need to provide a checkpoint path for SlimSAM models.") + else: + checkpoint_path = hf_hub_download("ybelkada/segment-anything", f"checkpoints/{args.model_name}.pth") + + convert_sam_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py b/venv/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..ccdc72fc7baadb24f420e1d556eb96159c5c4b72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/image_processing_sam.py @@ -0,0 +1,1496 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for SAM.""" +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import ( + TensorType, + is_tf_available, + is_torch_available, + is_torchvision_available, + logging, + requires_backends, +) + + +if is_torch_available(): + import torch + import torch.nn.functional as F + +if is_torchvision_available(): + from torchvision.ops.boxes import batched_nms + +if is_tf_available(): + import tensorflow as tf + from tensorflow.experimental import numpy as tnp + + from ...tf_utils import flatten, shape_list + +logger = logging.get_logger(__name__) + + +class SamImageProcessor(BaseImageProcessor): + r""" + Constructs a SAM image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`): + Size of the output image after resizing. Resizes the longest edge of the image to match + `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the + `preprocess` method. + mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`): + Size of the output segmentation map after resizing. Resizes the longest edge of the image to match + `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter + in the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be + overridden by the `rescale_factor` parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be + overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the + `preprocess` method. + pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`): + Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess` + method. + mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`): + Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in + the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + mask_size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: bool = True, + pad_size: int = None, + mask_pad_size: int = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"longest_edge": 1024} + size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size + + pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024} + pad_size = get_size_dict(pad_size, default_to_square=True) + + mask_size = mask_size if mask_size is not None else {"longest_edge": 256} + mask_size = ( + get_size_dict(max_size=mask_size, default_to_square=False) + if not isinstance(mask_size, dict) + else mask_size + ) + + mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256} + mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) + + self.do_resize = do_resize + self.size = size + self.mask_size = mask_size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self.do_pad = do_pad + self.pad_size = pad_size + self.mask_pad_size = mask_pad_size + self.do_convert_rgb = do_convert_rgb + self._valid_processor_keys = [ + "images", + "segmentation_maps", + "do_resize", + "size", + "mask_size", + "resample", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "do_pad", + "pad_size", + "mask_pad_size", + "do_convert_rgb", + "return_tensors", + "data_format", + "input_data_format", + ] + + def pad_image( + self, + image: np.ndarray, + pad_size: Dict[str, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom. + + Args: + image (`np.ndarray`): + Image to pad. + pad_size (`Dict[str, int]`): + Size of the output image after padding. + data_format (`str` or `ChannelDimension`, *optional*): + The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the + `data_format` of the `image` will be used. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + output_height, output_width = pad_size["height"], pad_size["width"] + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + + pad_width = output_width - input_width + pad_height = output_height - input_height + + padded_image = pad( + image, + ((0, pad_height), (0, pad_width)), + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + return padded_image + + def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int): + """ + Compute the output size given input size and target long side length. + """ + oldh, oldw = old_shape + scale = longest_edge * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + newh = int(newh + 0.5) + neww = int(neww + 0.5) + return (newh, neww) + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest + edge of the image will be resized to the specified size, while the other edge will be resized to + maintain the aspect ratio. + resample: + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "longest_edge" not in size: + raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}") + input_size = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"]) + return resize( + image, + size=(output_height, output_width), + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def _preprocess( + self, + image: ImageInput, + do_resize: bool, + do_rescale: bool, + do_normalize: bool, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = None, + rescale_factor: Optional[float] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + pad_size: Optional[Dict[str, int]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ): + if do_resize: + image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + reshaped_input_size = get_image_size(image, channel_dim=input_data_format) + + if do_rescale: + image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + + if do_normalize: + image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + + if do_pad: + image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format) + + return image, reshaped_input_size + + def _preprocess_image( + self, + image: ImageInput, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: bool = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + pad_size: Optional[Dict[str, int]] = None, + do_convert_rgb: Optional[bool] = None, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]: + image = to_numpy_array(image) + + # PIL RGBA images are converted to RGB + if do_convert_rgb: + image = convert_to_rgb(image) + + # All transformations expect numpy arrays. + image = to_numpy_array(image) + + if is_scaled_image(image) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + + original_size = get_image_size(image, channel_dim=input_data_format) + + image, reshaped_input_size = self._preprocess( + image=image, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_pad=do_pad, + pad_size=pad_size, + input_data_format=input_data_format, + ) + + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + + return image, original_size, reshaped_input_size + + def _preprocess_mask( + self, + segmentation_map: ImageInput, + do_resize: Optional[bool] = None, + mask_size: Dict[str, int] = None, + do_pad: Optional[bool] = None, + mask_pad_size: Optional[Dict[str, int]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + segmentation_map = to_numpy_array(segmentation_map) + + # Add channel dimension if missing - needed for certain transformations + if segmentation_map.ndim == 2: + added_channel_dim = True + segmentation_map = segmentation_map[None, ...] + input_data_format = ChannelDimension.FIRST + else: + added_channel_dim = False + if input_data_format is None: + input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1) + + original_size = get_image_size(segmentation_map, channel_dim=input_data_format) + + segmentation_map, _ = self._preprocess( + image=segmentation_map, + do_resize=do_resize, + size=mask_size, + resample=PILImageResampling.NEAREST, + do_rescale=False, + do_normalize=False, + do_pad=do_pad, + pad_size=mask_pad_size, + input_data_format=input_data_format, + ) + + # Remove extra channel dimension if added for processing + if added_channel_dim: + segmentation_map = segmentation_map.squeeze(0) + segmentation_map = segmentation_map.astype(np.int64) + + return segmentation_map, original_size + + def preprocess( + self, + images: ImageInput, + segmentation_maps: Optional[ImageInput] = None, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + mask_size: Optional[Dict[str, int]] = None, + resample: Optional["PILImageResampling"] = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[Union[int, float]] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_pad: Optional[bool] = None, + pad_size: Optional[Dict[str, int]] = None, + mask_pad_size: Optional[Dict[str, int]] = None, + do_convert_rgb: Optional[bool] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + segmentation_maps (`ImageInput`, *optional*): + Segmentation map to preprocess. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Controls the size of the image after `resize`. The longest edge of the image is resized to + `size["longest_edge"]` whilst preserving the aspect ratio. + mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`): + Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to + `size["longest_edge"]` whilst preserving the aspect ratio. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image pixel values by rescaling factor. + rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to apply to the image pixel values. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to normalize the image by if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to normalize the image by if `do_normalize` is set to `True`. + do_pad (`bool`, *optional*, defaults to `self.do_pad`): + Whether to pad the image. + pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`): + Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and + `pad_size["width"]` if `do_pad` is set to `True`. + mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`): + Controls the size of the padding applied to the segmentation map. The image is padded to + `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size + mask_size = mask_size if mask_size is not None else self.mask_size + mask_size = ( + get_size_dict(max_size=mask_size, default_to_square=False) + if not isinstance(mask_size, dict) + else mask_size + ) + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_pad = do_pad if do_pad is not None else self.do_pad + pad_size = pad_size if pad_size is not None else self.pad_size + pad_size = get_size_dict(pad_size, default_to_square=True) + mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size + mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True) + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + images = make_list_of_images(images) + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if segmentation_maps is not None: + segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) + + if not valid_images(segmentation_maps): + raise ValueError( + "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_pad=do_pad, + size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size. + do_resize=do_resize, + size=size, + resample=resample, + ) + + images, original_sizes, reshaped_input_sizes = zip( + *( + self._preprocess_image( + image=img, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_pad=do_pad, + pad_size=pad_size, + do_convert_rgb=do_convert_rgb, + data_format=data_format, + input_data_format=input_data_format, + ) + for img in images + ) + ) + + data = { + "pixel_values": images, + "original_sizes": original_sizes, + "reshaped_input_sizes": reshaped_input_sizes, + } + + if segmentation_maps is not None: + segmentation_maps, original_mask_sizes = zip( + *( + self._preprocess_mask( + segmentation_map=mask, + do_resize=do_resize, + mask_size=mask_size, + do_pad=do_pad, + mask_pad_size=mask_pad_size, + input_data_format=input_data_format, + ) + for mask in segmentation_maps + ) + ) + + # masks should start out the same size as input images + assert all( + original_im_size == original_mask_size + for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes) + ), "Segmentation maps should be the same size as input images." + + data["labels"] = segmentation_maps + + return BatchFeature(data=data, tensor_type=return_tensors) + + def post_process_masks( + self, + masks, + original_sizes, + reshaped_input_sizes, + mask_threshold=0.0, + binarize=True, + pad_size=None, + return_tensors="pt", + ): + """ + Remove padding and upscale masks to the original image size. + + Args: + masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`): + Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. + original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): + The original sizes of each image before it was resized to the model's expected input shape, in (height, + width) format. + reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`): + The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. + mask_threshold (`float`, *optional*, defaults to 0.0): + The threshold to use for binarizing the masks. + binarize (`bool`, *optional*, defaults to `True`): + Whether to binarize the masks. + pad_size (`int`, *optional*, defaults to `self.pad_size`): + The target size the images were padded to before being passed to the model. If None, the target size is + assumed to be the processor's `pad_size`. + return_tensors (`str`, *optional*, defaults to `"pt"`): + If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors. + Returns: + (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where + (height, width) is given by original_size. + """ + if return_tensors == "pt": + return self._post_process_masks_pt( + masks=masks, + original_sizes=original_sizes, + reshaped_input_sizes=reshaped_input_sizes, + mask_threshold=mask_threshold, + binarize=binarize, + pad_size=pad_size, + ) + elif return_tensors == "tf": + return self._post_process_masks_tf( + masks=masks, + original_sizes=original_sizes, + reshaped_input_sizes=reshaped_input_sizes, + mask_threshold=mask_threshold, + binarize=binarize, + pad_size=pad_size, + ) + else: + raise ValueError("return_tensors must be either 'pt' or 'tf'") + + def _post_process_masks_pt( + self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None + ): + """ + Remove padding and upscale masks to the original image size. + + Args: + masks (`Union[List[torch.Tensor], List[np.ndarray]]`): + Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. + original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): + The original sizes of each image before it was resized to the model's expected input shape, in (height, + width) format. + reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`): + The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. + mask_threshold (`float`, *optional*, defaults to 0.0): + The threshold to use for binarizing the masks. + binarize (`bool`, *optional*, defaults to `True`): + Whether to binarize the masks. + pad_size (`int`, *optional*, defaults to `self.pad_size`): + The target size the images were padded to before being passed to the model. If None, the target size is + assumed to be the processor's `pad_size`. + Returns: + (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) + is given by original_size. + """ + requires_backends(self, ["torch"]) + pad_size = self.pad_size if pad_size is None else pad_size + target_image_size = (pad_size["height"], pad_size["width"]) + if isinstance(original_sizes, (torch.Tensor, np.ndarray)): + original_sizes = original_sizes.tolist() + if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)): + reshaped_input_sizes = reshaped_input_sizes.tolist() + output_masks = [] + for i, original_size in enumerate(original_sizes): + if isinstance(masks[i], np.ndarray): + masks[i] = torch.from_numpy(masks[i]) + elif not isinstance(masks[i], torch.Tensor): + raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`") + interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False) + interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]] + interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False) + if binarize: + interpolated_mask = interpolated_mask > mask_threshold + output_masks.append(interpolated_mask) + + return output_masks + + def _post_process_masks_tf( + self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None + ): + """ + Remove padding and upscale masks to the original image size. + + Args: + masks (`tf.Tensor`): + Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. + original_sizes (`tf.Tensor`): + The original size of the images before resizing for input to the model, in (height, width) format. + reshaped_input_sizes (`tf.Tensor`): + The size of the image input to the model, in (height, width) format. Used to remove padding. + mask_threshold (`float`, *optional*, defaults to 0.0): + The threshold to use for binarizing the masks. + binarize (`bool`, *optional*, defaults to `True`): + Whether to binarize the masks. + pad_size (`int`, *optional*, defaults to `self.pad_size`): + The target size the images were padded to before being passed to the model. If None, the target size is + assumed to be the processor's `pad_size`. + Returns: + (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is + given by original_size. + """ + requires_backends(self, ["tf"]) + pad_size = self.pad_size if pad_size is None else pad_size + target_image_size = (pad_size["height"], pad_size["width"]) + + output_masks = [] + for i, original_size in enumerate(original_sizes): + # tf.image expects NHWC, we transpose the NCHW inputs for it + mask = tf.transpose(masks[i], perm=[0, 2, 3, 1]) + interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear") + interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :] + interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear") + if binarize: + interpolated_mask = interpolated_mask > mask_threshold + # And then we transpose them back at the end + output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2])) + + return output_masks + + def post_process_for_mask_generation( + self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt" + ): + """ + Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks. + + Args: + all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`): + List of all predicted segmentation masks + all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`): + List of all predicted iou scores + all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`): + List of all bounding boxes of the predicted masks + crops_nms_thresh (`float`): + Threshold for NMS (Non Maximum Suppression) algorithm. + return_tensors (`str`, *optional*, defaults to `pt`): + If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. + """ + if return_tensors == "pt": + return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh) + elif return_tensors == "tf": + return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh) + + def generate_crop_boxes( + self, + image, + target_size, + crop_n_layers: int = 0, + overlap_ratio: float = 512 / 1500, + points_per_crop: Optional[int] = 32, + crop_n_points_downscale_factor: Optional[List[int]] = 1, + device: Optional["torch.device"] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + return_tensors: str = "pt", + ): + """ + Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. + + Args: + image (`np.array`): + Input original image + target_size (`int`): + Target size of the resized image + crop_n_layers (`int`, *optional*, defaults to 0): + If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where + each layer has 2**i_layer number of image crops. + overlap_ratio (`float`, *optional*, defaults to 512/1500): + Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + points_per_crop (`int`, *optional*, defaults to 32): + Number of points to sample from each crop. + crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1): + The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + device (`torch.device`, *optional*, defaults to None): + Device to use for the computation. If None, cpu will be used. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + return_tensors (`str`, *optional*, defaults to `pt`): + If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. + """ + crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes( + image, + target_size, + crop_n_layers, + overlap_ratio, + points_per_crop, + crop_n_points_downscale_factor, + input_data_format, + ) + if return_tensors == "pt": + if device is None: + device = torch.device("cpu") + crop_boxes = torch.tensor(crop_boxes, device=device) + points_per_crop = torch.tensor(points_per_crop, device=device) + # cropped_images stays as np + input_labels = torch.tensor(input_labels, device=device) + + elif return_tensors == "tf": + if device is not None: + raise ValueError("device is not a supported argument when return_tensors is tf!") + crop_boxes = tf.convert_to_tensor(crop_boxes) + points_per_crop = tf.convert_to_tensor(points_per_crop) + # cropped_images stays as np + input_labels = tf.convert_to_tensor(input_labels) + else: + raise ValueError("return_tensors must be either 'pt' or 'tf'.") + return crop_boxes, points_per_crop, cropped_images, input_labels + + def filter_masks( + self, + masks, + iou_scores, + original_size, + cropped_box_image, + pred_iou_thresh=0.88, + stability_score_thresh=0.95, + mask_threshold=0, + stability_score_offset=1, + return_tensors="pt", + ): + """ + Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being + that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability + score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to + bounding boxes and pad the predicted masks if necessary. + + Args: + masks (`Union[torch.Tensor, tf.Tensor]`): + Input masks. + iou_scores (`Union[torch.Tensor, tf.Tensor]`): + List of IoU scores. + original_size (`Tuple[int,int]`): + Size of the orginal image. + cropped_box_image (`np.array`): + The cropped image. + pred_iou_thresh (`float`, *optional*, defaults to 0.88): + The threshold for the iou scores. + stability_score_thresh (`float`, *optional*, defaults to 0.95): + The threshold for the stability score. + mask_threshold (`float`, *optional*, defaults to 0): + The threshold for the predicted masks. + stability_score_offset (`float`, *optional*, defaults to 1): + The offset for the stability score used in the `_compute_stability_score` method. + return_tensors (`str`, *optional*, defaults to `pt`): + If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`. + """ + if return_tensors == "pt": + return self._filter_masks_pt( + masks=masks, + iou_scores=iou_scores, + original_size=original_size, + cropped_box_image=cropped_box_image, + pred_iou_thresh=pred_iou_thresh, + stability_score_thresh=stability_score_thresh, + mask_threshold=mask_threshold, + stability_score_offset=stability_score_offset, + ) + elif return_tensors == "tf": + return self._filter_masks_tf( + masks=masks, + iou_scores=iou_scores, + original_size=original_size, + cropped_box_image=cropped_box_image, + pred_iou_thresh=pred_iou_thresh, + stability_score_thresh=stability_score_thresh, + mask_threshold=mask_threshold, + stability_score_offset=stability_score_offset, + ) + + def _filter_masks_pt( + self, + masks, + iou_scores, + original_size, + cropped_box_image, + pred_iou_thresh=0.88, + stability_score_thresh=0.95, + mask_threshold=0, + stability_score_offset=1, + ): + """ + Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being + that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability + score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to + bounding boxes and pad the predicted masks if necessary. + + Args: + masks (`torch.Tensor`): + Input masks. + iou_scores (`torch.Tensor`): + List of IoU scores. + original_size (`Tuple[int,int]`): + Size of the orginal image. + cropped_box_image (`np.array`): + The cropped image. + pred_iou_thresh (`float`, *optional*, defaults to 0.88): + The threshold for the iou scores. + stability_score_thresh (`float`, *optional*, defaults to 0.95): + The threshold for the stability score. + mask_threshold (`float`, *optional*, defaults to 0): + The threshold for the predicted masks. + stability_score_offset (`float`, *optional*, defaults to 1): + The offset for the stability score used in the `_compute_stability_score` method. + + """ + requires_backends(self, ["torch"]) + original_height, original_width = original_size + iou_scores = iou_scores.flatten(0, 1) + masks = masks.flatten(0, 1) + + if masks.shape[0] != iou_scores.shape[0]: + raise ValueError("masks and iou_scores must have the same batch size.") + + if masks.device != iou_scores.device: + iou_scores = iou_scores.to(masks.device) + + batch_size = masks.shape[0] + + keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device) + + if pred_iou_thresh > 0.0: + keep_mask = keep_mask & (iou_scores > pred_iou_thresh) + + # compute stability score + if stability_score_thresh > 0.0: + stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset) + keep_mask = keep_mask & (stability_scores > stability_score_thresh) + + scores = iou_scores[keep_mask] + masks = masks[keep_mask] + + # binarize masks + masks = masks > mask_threshold + converted_boxes = _batched_mask_to_box(masks) + + keep_mask = ~_is_box_near_crop_edge( + converted_boxes, cropped_box_image, [0, 0, original_width, original_height] + ) + + scores = scores[keep_mask] + masks = masks[keep_mask] + converted_boxes = converted_boxes[keep_mask] + + masks = _pad_masks(masks, cropped_box_image, original_height, original_width) + # conversion to rle is necessary to run non-maximum suppresion + masks = _mask_to_rle_pytorch(masks) + + return masks, scores, converted_boxes + + def _filter_masks_tf( + self, + masks, + iou_scores, + original_size, + cropped_box_image, + pred_iou_thresh=0.88, + stability_score_thresh=0.95, + mask_threshold=0, + stability_score_offset=1, + ): + """ + Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being + that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability + score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to + bounding boxes and pad the predicted masks if necessary. + + Args: + masks (`tf.Tensor`): + Input masks. + iou_scores (`tf.Tensor`): + List of IoU scores. + original_size (`Tuple[int,int]`): + Size of the orginal image. + cropped_box_image (`np.array`): + The cropped image. + pred_iou_thresh (`float`, *optional*, defaults to 0.88): + The threshold for the iou scores. + stability_score_thresh (`float`, *optional*, defaults to 0.95): + The threshold for the stability score. + mask_threshold (`float`, *optional*, defaults to 0): + The threshold for the predicted masks. + stability_score_offset (`float`, *optional*, defaults to 1): + The offset for the stability score used in the `_compute_stability_score` method. + + """ + requires_backends(self, ["tf"]) + original_height, original_width = original_size + iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]]) + masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]]) + + if masks.shape[0] != iou_scores.shape[0]: + raise ValueError("masks and iou_scores must have the same batch size.") + + batch_size = masks.shape[0] + + keep_mask = tf.ones(batch_size, dtype=tf.bool) + + if pred_iou_thresh > 0.0: + keep_mask = keep_mask & (iou_scores > pred_iou_thresh) + + # compute stability score + if stability_score_thresh > 0.0: + stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset) + keep_mask = keep_mask & (stability_scores > stability_score_thresh) + + scores = iou_scores[keep_mask] + masks = masks[keep_mask] + + # binarize masks + masks = masks > mask_threshold + converted_boxes = _batched_mask_to_box_tf(masks) + + keep_mask = ~_is_box_near_crop_edge_tf( + converted_boxes, cropped_box_image, [0, 0, original_width, original_height] + ) + + scores = scores[keep_mask] + masks = masks[keep_mask] + converted_boxes = converted_boxes[keep_mask] + + masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width) + # conversion to rle is necessary to run non-maximum suppresion + masks = _mask_to_rle_tf(masks) + + return masks, scores, converted_boxes + + +def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int): + # One mask is always contained inside the other. + # Save memory by preventing unnecesary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) + ) + unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32) + stability_scores = intersections / unions + return stability_scores + + +def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int): + # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure + # we get the right division results. + intersections = tf.count_nonzero( + masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32 + ) + unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32) + stability_scores = intersections / unions + return stability_scores + + +def _build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def _normalize_coordinates( + target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False +) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width) + format. + """ + old_height, old_width = original_size + + scale = target_size * 1.0 / max(old_height, old_width) + new_height, new_width = old_height * scale, old_width * scale + new_width = int(new_width + 0.5) + new_height = int(new_height + 0.5) + + coords = deepcopy(coords).astype(float) + + if is_bounding_box: + coords = coords.reshape(-1, 2, 2) + + coords[..., 0] = coords[..., 0] * (new_width / old_width) + coords[..., 1] = coords[..., 1] * (new_height / old_height) + + if is_bounding_box: + coords = coords.reshape(-1, 4) + + return coords + + +def _generate_crop_boxes( + image, + target_size: int, # Is it tuple here? + crop_n_layers: int = 0, + overlap_ratio: float = 512 / 1500, + points_per_crop: Optional[int] = 32, + crop_n_points_downscale_factor: Optional[List[int]] = 1, + input_data_format: Optional[Union[str, ChannelDimension]] = None, +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. + + Args: + image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]): + Image to generate crops for. + target_size (`int`): + Size of the smallest crop. + crop_n_layers (`int`, *optional*): + If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers + to run, where each layer has 2**i_layer number of image crops. + overlap_ratio (`int`, *optional*): + Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the + image length. Later layers with more crops scale down this overlap. + points_per_crop (`int`, *optional*): + Number of points to sample per crop. + crop_n_points_downscale_factor (`int`, *optional*): + The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + input_data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + + if isinstance(image, list): + raise ValueError("Only one image is allowed for crop generation.") + image = to_numpy_array(image) + original_size = get_image_size(image, input_data_format) + + points_grid = [] + for i in range(crop_n_layers + 1): + n_points = int(points_per_crop / (crop_n_points_downscale_factor**i)) + points_grid.append(_build_point_grid(n_points)) + + crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size) + + cropped_images, point_grid_per_crop = _generate_crop_images( + crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format + ) + crop_boxes = np.array(crop_boxes) + crop_boxes = crop_boxes.astype(np.float32) + points_per_crop = np.array([point_grid_per_crop]) + points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3)) + + input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64) + + return crop_boxes, points_per_crop, cropped_images, input_labels + + +def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size): + """ + Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format + consists of the following required indices: + - X: X coordinate of the top left of the bounding box + - Y: Y coordinate of the top left of the bounding box + - W: width of the bounding box + - H: height of the bounding box + """ + crop_boxes, layer_idxs = [], [] + im_height, im_width = original_size + short_side = min(im_height, im_width) + + # Original image + crop_boxes.append([0, 0, im_width, im_height]) + layer_idxs.append(0) + for i_layer in range(crop_n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side)) + crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side)) + + crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)] + + for left, top in product(crop_box_x0, crop_box_y0): + box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def _generate_crop_images( + crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None +): + """ + Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are + also passed. + """ + cropped_images = [] + total_points_per_crop = [] + for i, crop_box in enumerate(crop_boxes): + left, top, right, bottom = crop_box + + channel_dim = infer_channel_dimension_format(image, input_data_format) + if channel_dim == ChannelDimension.LAST: + cropped_im = image[top:bottom, left:right, :] + else: + cropped_im = image[:, top:bottom, left:right] + + cropped_images.append(cropped_im) + + cropped_im_size = get_image_size(cropped_im, channel_dim) + points_scale = np.array(cropped_im_size)[None, ::-1] + + points = points_grid[layer_idxs[i]] * points_scale + normalized_points = _normalize_coordinates(target_size, points, original_size) + total_points_per_crop.append(normalized_points) + + return cropped_images, total_points_per_crop + + +def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int): + left, top, right, bottom = crop_box + if left == 0 and top == 0 and right == orig_width and bottom == orig_height: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) + pad = (left, pad_x - left, top, pad_y - top) + return torch.nn.functional.pad(masks, pad, value=0) + + +def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int): + left, top, right, bottom = crop_box + if left == 0 and top == 0 and right == orig_width and bottom == orig_height: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top) + pad = (left, pad_x - left, top, pad_y - top) + return tf.pad(masks, pad, constant_values=0) + + +def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0): + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + + left, top, _, _ = crop_box + offset = torch.tensor([[left, top, left, top]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + boxes = (boxes + offset).float() + + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0): + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32) + orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32) + + left, top, _, _ = crop_box + offset = tf.convert_to_tensor([[left, top, left, top]]) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = tf.expand_dims(offset, 1) + boxes = tf.cast(boxes + offset, tf.float32) + + near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0) + near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0) + near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge) + return tf.reduce_any(near_crop_edge, axis=1) + + +def _batched_mask_to_box(masks: "torch.Tensor"): + """ + Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which + corresponds the following required indices: + - LEFT: left hand side of the bounding box + - TOP: top of the bounding box + - RIGHT: right of the bounding box + - BOTTOM: bottom of the bounding box + + Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape + is channel_1 x channel_2 x ... x 4. + + Args: + - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`) + """ + # torch.max below raises an error on empty inputs, just skip in this case + + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to Cxheightxwidth + shape = masks.shape + height, width = shape[-2:] + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + height * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + width * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + out = out.reshape(*shape[:-2], 4) + return out + + +def _batched_mask_to_box_tf(masks: "tf.Tensor"): + """ + Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which + corresponds the following required indices: + - LEFT: left hand side of the bounding box + - TOP: top of the bounding box + - RIGHT: right of the bounding box + - BOTTOM: bottom of the bounding box + + Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape + is channel_1 x channel_2 x ... x 4. + + Args: + - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`) + """ + + if tf.size(masks) == 0: + return tf.zeros([*masks.shape[:-2], 4]) + + # Normalize shape to Cxheightxwidth + shape = shape_list(masks) + height, width = shape[-2:] + + # Get top and bottom edges + in_height = tf.reduce_max(masks, axis=-1) + in_height_coords = in_height * tf.range(height)[None, :] + bottom_edges = tf.reduce_max(in_height_coords, axis=-1) + in_height_coords = in_height_coords + height * (~in_height) + top_edges = tf.reduce_min(in_height_coords, axis=-1) + + # Get left and right edges + in_width, _ = tf.reduce_max(masks, axis=-2) + in_width_coords = in_width * tf.range(width)[None, :] + right_edges, _ = tf.reduce_max(in_width_coords, axis=-1) + in_width_coords = in_width_coords + width * (~in_width) + left_edges, _ = tf.reduce_min(in_width_coords, axis=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1) + out = out * tf.expand_dims(~empty_filter, -1) + + # Return to original shape + out = tf.reshape(out, *shape[:-2], 4) + return out + + +def _mask_to_rle_pytorch(input_mask: "torch.Tensor"): + """ + Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. + """ + # Put in fortran order and flatten height and width + batch_size, height, width = input_mask.shape + input_mask = input_mask.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = input_mask[:, 1:] ^ input_mask[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(batch_size): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if input_mask[i, 0] == 0 else [0] + counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] + out.append({"size": [height, width], "counts": counts}) + return out + + +def _mask_to_rle_tf(input_mask: "tf.Tensor"): + """ + Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools. + """ + # Put in fortran order and flatten height and width + batch_size, height, width = input_mask.shape + input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1) + + # Compute change indices + diff = input_mask[:, 1:] ^ input_mask[:, :-1] + change_indices = tf.where(diff) + + # Encode run length + out = [] + for i in range(batch_size): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1 + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if input_mask[i, 0] == 0 else [0] + counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]] + out.append({"size": [height, width], "counts": counts}) + return out + + +def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + height, width = rle["size"] + mask = np.empty(height * width, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity = not parity + mask = mask.reshape(width, height) + return mask.transpose() # Reshape to original shape + + +def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): + """ + Perform NMS (Non Maximum Suppression) on the outputs. + + Args: + rle_masks (`torch.Tensor`): + binary masks in the RLE format + iou_scores (`torch.Tensor` of shape (nb_masks, 1)): + iou_scores predicted by the model + mask_boxes (`torch.Tensor`): + The bounding boxes corresponding to segmentation masks + amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): + NMS threshold. + """ + keep_by_nms = batched_nms( + boxes=mask_boxes.float(), + scores=iou_scores, + idxs=torch.zeros(mask_boxes.shape[0]), + iou_threshold=amg_crops_nms_thresh, + ) + + iou_scores = iou_scores[keep_by_nms] + rle_masks = [rle_masks[i] for i in keep_by_nms] + mask_boxes = mask_boxes[keep_by_nms] + masks = [_rle_to_mask(rle) for rle in rle_masks] + + return masks, iou_scores, rle_masks, mask_boxes + + +def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): + """ + Perform NMS (Non Maximum Suppression) on the outputs. + + Args: + rle_masks (`tf.Tensor`): + binary masks in the RLE format + iou_scores (`tf.Tensor` of shape (nb_masks, 1)): + iou_scores predicted by the model + mask_boxes (`tf.Tensor`): + The bounding boxes corresponding to segmentation masks + amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): + NMS threshold. + """ + keep_by_nms = tf.image.combined_non_max_suppression( + boxes=mask_boxes.float(), + scores=iou_scores, + idxs=torch.zeros(mask_boxes.shape[0]), + iou_threshold=amg_crops_nms_thresh, + ) + + iou_scores = iou_scores[keep_by_nms] + rle_masks = [rle_masks[i] for i in keep_by_nms] + mask_boxes = mask_boxes[keep_by_nms] + masks = [_rle_to_mask(rle) for rle in rle_masks] + + return masks, iou_scores, rle_masks, mask_boxes diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_sam.py b/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..385fb9c00aea4f4df7c6ea062159ab8de08df7d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_sam.py @@ -0,0 +1,1415 @@ +# coding=utf-8 +# Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch SAM model.""" + +import collections +import math +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import Tensor, nn + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "SamConfig" +_CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" + + +from ..deprecated._archive_maps import SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class SamVisionEncoderOutput(ModelOutput): + """ + Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection + layer to the pooler_output. + + Args: + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + image_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SamImageSegmentationOutput(ModelOutput): + """ + Base class for Segment-Anything model's output + + Args: + iou_scores (`torch.FloatTensor` of shape `(batch_size, num_masks)`): + The iou scores of the predicted masks. + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`): + The predicted low resolutions masks. Needs to be post-processed by the processor + vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. + vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + iou_scores: torch.FloatTensor = None + pred_masks: torch.FloatTensor = None + vision_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + vision_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + mask_decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + + +class SamPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values): + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) + return embeddings + + +class SamMLPBlock(nn.Module): + def __init__(self, config): + super().__init__() + self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) + self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) + self.act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.lin1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.lin2(hidden_states) + return hidden_states + + +# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->Sam +class SamLayerNorm(nn.Module): + r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError(f"Unsupported data format: {self.data_format}") + self.normalized_shape = (normalized_shape,) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.data_format == "channels_last": + x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + input_dtype = x.dtype + x = x.float() + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = x.to(dtype=input_dtype) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class SamAttention(nn.Module): + """ + SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and + values. + """ + + def __init__(self, config, downsample_rate=None): + super().__init__() + self.hidden_size = config.hidden_size + + downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate + + self.internal_dim = config.hidden_size // downsample_rate + self.num_attention_heads = config.num_attention_heads + if self.internal_dim % config.num_attention_heads != 0: + raise ValueError("num_attention_heads must divide hidden_size.") + + self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) + self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) + self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, self.hidden_size) + + def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor: + batch, point_batch_size, n_tokens, channel = hidden_states.shape + c_per_head = channel // num_attention_heads + hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) + return hidden_states.transpose(1, 2) + + def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor: + batch, n_heads, n_tokens, c_per_head = hidden_states.shape + hidden_states = hidden_states.transpose(1, 2) + return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head) + + def forward(self, query: Tensor, key: Tensor, value: Tensor, attention_similarity: Tensor = None) -> Tensor: + # Input projections + query = self.q_proj(query) + key = self.k_proj(key) + value = self.v_proj(value) + + point_batch_size = query.shape[1] + # Separate into heads + query = self._separate_heads(query, self.num_attention_heads) + key = self._separate_heads(key, self.num_attention_heads) + value = self._separate_heads(value, self.num_attention_heads) + + # SamAttention + _, _, _, c_per_head = query.shape + attn = query @ key.permute(0, 1, 3, 2) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + if attention_similarity is not None: + attn = attn + attention_similarity + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ value + out = self._recombine_heads(out, point_batch_size) + out = self.out_proj(out) + + return out + + +class SamTwoWayAttentionBlock(nn.Module): + def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False): + """ + A transformer block with four layers: + (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on + sparse inputs (4) cross attention of dense inputs -> sparse inputs + + Arguments: + config (`SamMaskDecoderConfig`): + The configuration file used to instantiate the block + attention_downsample_rate (*optionalk*, int, defaults to 2): + The downsample ratio of the block used to reduce the inner dim of the attention. + skip_first_layer_pe (*optional*, bool, defaults to `False`): + Whether or not to skip the addition of the query_point_embedding on the first layer. + """ + super().__init__() + + self.hidden_size = config.hidden_size + self.layer_norm_eps = config.layer_norm_eps + + self.self_attn = SamAttention(config, downsample_rate=1) + self.layer_norm1 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) + + self.cross_attn_token_to_image = SamAttention(config, downsample_rate=attention_downsample_rate) + self.layer_norm2 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) + + self.mlp = SamMLPBlock(config) + self.layer_norm3 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) + + self.layer_norm4 = nn.LayerNorm(self.hidden_size, eps=self.layer_norm_eps) + self.cross_attn_image_to_token = SamAttention(config, downsample_rate=attention_downsample_rate) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, + queries: Tensor, + keys: Tensor, + query_point_embedding: Tensor, + key_point_embedding: Tensor, + attention_similarity: Tensor, + output_attentions: bool = False, + ): + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(query=queries, key=queries, value=queries) + else: + query = queries + query_point_embedding + attn_out = self.self_attn(query=query, key=query, value=queries) + queries = queries + attn_out + queries = self.layer_norm1(queries) + + # Cross attention block, tokens attending to image embedding + query = queries + query_point_embedding + key = keys + key_point_embedding + + attn_out = self.cross_attn_token_to_image( + query=query, key=key, value=keys, attention_similarity=attention_similarity + ) + queries = queries + attn_out + + queries = self.layer_norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.layer_norm3(queries) + + # Cross attention block, image embedding attending to tokens + query = queries + query_point_embedding + key = keys + key_point_embedding + + attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) + keys = keys + attn_out + + keys = self.layer_norm4(keys) + + outputs = (queries, keys) + + if output_attentions: + outputs = outputs + (attn_out,) + else: + outputs = outputs + (None,) + + return outputs + + +class SamTwoWayTransformer(nn.Module): + def __init__(self, config: SamMaskDecoderConfig): + super().__init__() + self.config = config + + self.num_hidden_layers = config.num_hidden_layers + self.layers = nn.ModuleList() + + for i in range(self.num_hidden_layers): + self.layers.append(SamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) + + self.final_attn_token_to_image = SamAttention(config) + self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) + + def forward( + self, + point_embeddings: Tensor, + image_embeddings: Tensor, + image_positional_embeddings: Tensor, + attention_similarity: Tensor, + target_embedding=None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + all_attentions = () + + if image_embeddings is None: + raise ValueError("You have to specify an image_embedding") + + image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) + image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) + + # Prepare queries + queries = point_embeddings + keys = image_embeddings + + # Apply transformer blocks and final layernorm + for layer in self.layers: + if target_embedding is not None: + queries += target_embedding + + queries, keys, attention_outputs = layer( + queries=queries, + keys=keys, + query_point_embedding=point_embeddings, + key_point_embedding=image_positional_embeddings, + attention_similarity=attention_similarity, + output_attentions=output_attentions, + ) + + if output_attentions: + all_attentions = all_attentions + (attention_outputs,) + + # Apply the final attenion layer from the points to the image + query = queries + point_embeddings + key = keys + image_positional_embeddings + + attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) + + queries = queries + attn_out + queries = self.layer_norm_final_attn(queries) + return queries, keys, all_attentions + + +class SamFeedForward(nn.Module): + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False + ): + super().__init__() + self.num_layers = num_layers + self.activation = nn.ReLU() + self.proj_in = nn.Linear(input_dim, hidden_dim) + self.proj_out = nn.Linear(hidden_dim, output_dim) + self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) + self.sigmoid_output = sigmoid_output + + def forward(self, hidden_states): + hidden_states = self.proj_in(hidden_states) + hidden_states = self.activation(hidden_states) + for layer in self.layers: + hidden_states = self.activation(layer(hidden_states)) + + hidden_states = self.proj_out(hidden_states) + if self.sigmoid_output: + hidden_states = F.sigmoid(hidden_states) + return hidden_states + + +class SamMaskDecoder(nn.Module): + def __init__(self, config: SamMaskDecoderConfig): + super().__init__() + + self.hidden_size = config.hidden_size + + self.num_multimask_outputs = config.num_multimask_outputs + self.num_mask_tokens = config.num_multimask_outputs + 1 + + self.iou_token = nn.Embedding(1, self.hidden_size) + self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) + + self.transformer = SamTwoWayTransformer(config) + + # should we create a new class for this? + self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) + self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) + self.upscale_layer_norm = SamLayerNorm(self.hidden_size // 4, data_format="channels_first") + self.activation = nn.GELU() + + mlps_list = [] + for _ in range(self.num_mask_tokens): + mlps_list += [SamFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] + self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) + + self.iou_prediction_head = SamFeedForward( + self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_positional_embeddings: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + output_attentions: Optional[bool] = None, + attention_similarity: torch.Tensor = None, + target_embedding: torch.Tensor = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Args: + image_embeddings (`torch.Tensor`): + the embeddings from the image encoder + image_positional_embedding (`torch.Tensor`): + positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (`torch.Tensor`): + The embeddings of the points and boxes + dense_prompt_embeddings (`torch.Tensor`): + the embeddings of the mask inputs + multimask_output (bool): + Whether to return multiple masks or a single mask. + output_attentions (bool, *optional*): + Whether or not to return the attentions tensors of all attention layers. + """ + batch_size, num_channels, height, width = image_embeddings.shape + point_batch_size = sparse_prompt_embeddings.shape[1] + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) + + if sparse_prompt_embeddings.sum().item() != 0: + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) + else: + tokens = output_tokens + point_embeddings = tokens.to(self.iou_token.weight.dtype) + + # Expand per-image data in batch direction to be per-point + image_embeddings = image_embeddings + dense_prompt_embeddings + image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0) + image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) + + # Run the transformer, image_positional_embedding are consumed + point_embedding, image_embeddings, attentions = self.transformer( + point_embeddings=point_embeddings, + image_embeddings=image_embeddings, + image_positional_embeddings=image_positional_embeddings, + attention_similarity=attention_similarity, + target_embedding=target_embedding, + output_attentions=output_attentions, + ) + iou_token_out = point_embedding[:, :, 0, :] + mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + image_embeddings = image_embeddings.transpose(2, 3).reshape( + batch_size * point_batch_size, num_channels, height, width + ) + + upscaled_embedding = self.upscale_conv1(image_embeddings) + upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) + upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) + + hyper_in_list = [] + for i in range(self.num_mask_tokens): + current_mlp = self.output_hypernetworks_mlps[i] + hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] + hyper_in = torch.stack(hyper_in_list, dim=2) + + _, num_channels, height, width = upscaled_embedding.shape + upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width) + masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width) + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + # Select the correct mask or masks for output + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, :, mask_slice, :, :] + iou_pred = iou_pred[:, :, mask_slice] + + outputs = (masks, iou_pred) + + if output_attentions: + outputs = outputs + (attentions,) + else: + outputs = outputs + (None,) + + return outputs + + +class SamPositionalEmbedding(nn.Module): + def __init__(self, config): + super().__init__() + self.scale = config.hidden_size // 2 + self.register_buffer("positional_embedding", self.scale * torch.randn((2, config.num_pos_feats))) + + def forward(self, input_coords, input_shape=None): + """Positionally encode points that are normalized to [0,1].""" + coordinates = input_coords.clone() + + if input_shape is not None: + coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1] + coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0] + + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coordinates = 2 * coordinates - 1 + coordinates = coordinates.to(self.positional_embedding.dtype) + coordinates = coordinates @ self.positional_embedding + coordinates = 2 * np.pi * coordinates + # outputs d_1 x ... x d_n x channel shape + return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1) + + +class SamMaskEmbedding(nn.Module): + def __init__(self, config: SamPromptEncoderConfig): + super().__init__() + self.mask_input_channels = config.mask_input_channels // 4 + self.activation = ACT2FN[config.hidden_act] + self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2) + self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2) + self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1) + self.layer_norm1 = SamLayerNorm( + self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first" + ) + self.layer_norm2 = SamLayerNorm( + self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first" + ) + + def forward(self, masks): + hidden_states = self.conv1(masks) + hidden_states = self.layer_norm1(hidden_states) + hidden_states = self.activation(hidden_states) + + hidden_states = self.conv2(hidden_states) + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.activation(hidden_states) + dense_embeddings = self.conv3(hidden_states) + return dense_embeddings + + +class SamPromptEncoder(nn.Module): + def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding): + super().__init__() + self.shared_embedding = shared_patch_embedding + self.mask_embed = SamMaskEmbedding(config) + self.no_mask_embed = nn.Embedding(1, config.hidden_size) + + self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) + self.input_image_size = config.image_size + + self.point_embed = nn.ModuleList( + [nn.Embedding(1, config.hidden_size) for i in range(config.num_point_embeddings)] + ) + self.hidden_size = config.hidden_size + self.not_a_point_embed = nn.Embedding(1, config.hidden_size) + + def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + target_point_shape = (points.shape[0], points.shape[1], 1, points.shape[-1]) + target_labels_shape = (points.shape[0], points.shape[1], 1) + padding_point = torch.zeros(target_point_shape, device=points.device) + padding_label = -torch.ones(target_labels_shape, device=labels.device) + points = torch.cat([points, padding_point], dim=2) + labels = torch.cat([labels, padding_label], dim=2) + input_shape = (self.input_image_size, self.input_image_size) + point_embedding = self.shared_embedding(points, input_shape) + + # torch.where and expanding the labels tensor is required by the ONNX export + point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding) + + # This is required for the ONNX export. The dtype, device need to be explicitely + # specificed as otherwise torch.onnx.export interprets as double + point_embedding = torch.where( + labels[..., None] != -10, + point_embedding, + torch.tensor(0.0, dtype=point_embedding.dtype, device=point_embedding.device), + ) + + point_embedding = torch.where( + (labels == 0)[:, :, :, None], + point_embedding + self.point_embed[0].weight[None, None, :, :], + point_embedding, + ) + + point_embedding = torch.where( + (labels == 1)[:, :, :, None], + point_embedding + self.point_embed[1].weight[None, None, :, :], + point_embedding, + ) + + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + batch_size, nb_boxes = boxes.shape[:2] + coords = boxes.reshape(batch_size, nb_boxes, 2, 2) + input_shape = (self.input_image_size, self.input_image_size) + corner_embedding = self.shared_embedding(coords, input_shape) + corner_embedding[:, :, 0, :] += self.point_embed[2].weight + corner_embedding[:, :, 1, :] += self.point_embed[3].weight + return corner_embedding + + def forward( + self, + input_points: Optional[Tuple[torch.Tensor, torch.Tensor]], + input_labels: Optional[torch.Tensor], + input_boxes: Optional[torch.Tensor], + input_masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense embeddings. + + Args: + points (`torch.Tensor`, *optional*): + point coordinates and labels to embed. + boxes (`torch.Tensor`, *optional*): + boxes to embed + masks (`torch.Tensor`, *optional*): + masks to embed + """ + sparse_embeddings = None + batch_size = 1 + target_device = self.shared_embedding.positional_embedding.device + if input_points is not None: + batch_size, point_batch_size = input_points.shape[:2] + if input_labels is None: + raise ValueError("If points are provided, labels must also be provided.") + point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) + sparse_embeddings = point_embeddings + if input_boxes is not None: + batch_size = input_boxes.shape[0] + box_embeddings = self._embed_boxes(input_boxes) + if sparse_embeddings is None: + sparse_embeddings = box_embeddings + else: + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) + if input_masks is not None: + dense_embeddings = self.mask_embed(input_masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + if sparse_embeddings is None: + sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device) + + return sparse_embeddings, dense_embeddings + + +class SamVisionAttention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__(self, config, window_size): + super().__init__() + input_size = ( + (config.image_size // config.patch_size, config.image_size // config.patch_size) + if window_size == 0 + else (window_size, window_size) + ) + + self.num_attention_heads = config.num_attention_heads + head_dim = config.hidden_size // config.num_attention_heads + self.scale = head_dim**-0.5 + self.dropout = config.attention_dropout + + self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) + self.proj = nn.Linear(config.hidden_size, config.hidden_size) + + self.use_rel_pos = config.use_rel_pos + if self.use_rel_pos: + if input_size is None: + raise ValueError("Input size must be provided if using relative positional encoding.") + + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + + Args: + q_size (int): + size of the query. + k_size (int): + size of key k. + rel_pos (`torch.Tensor`): + relative position embeddings (L, channel). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + def add_decomposed_rel_pos( + self, + attn: torch.Tensor, + query: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], + ) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + + Args: + attn (`torch.Tensor`): + attention map. + query (`torch.Tensor`): + query q in the attention layer with shape (batch_size, query_height * query_width, channel). + rel_pos_h (`torch.Tensor`): + relative position embeddings (Lh, channel) for height axis. + rel_pos_w (`torch.Tensor`): + relative position embeddings (Lw, channel) for width axis. + q_size (tuple): + spatial sequence size of query q with (query_height, query_width). + k_size (tuple): + spatial sequence size of key k with (key_height, key_width). + + Returns: + attn (`torch.Tensor`): + attention map with added relative positional embeddings. + """ + query_height, query_width = q_size + key_height, key_width = k_size + relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) + relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) + + batch_size, _, dim = query.shape + reshaped_query = query.reshape(batch_size, query_height, query_width, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) + rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) + attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width) + attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width) + return attn + + def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: + batch_size, height, width, _ = hidden_states.shape + # qkv with shape (3, batch_size, nHead, height * width, channel) + qkv = ( + self.qkv(hidden_states) + .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) + .permute(2, 0, 3, 1, 4) + ) + # q, k, v with shape (batch_size * nHead, height * width, channel) + query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) + + attn_weights = (query * self.scale) @ key.transpose(-2, -1) + + if self.use_rel_pos: + attn_weights = self.add_decomposed_rel_pos( + attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) + ) + + attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) + attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) + + attn_output = self.proj(attn_output) + + if output_attentions: + outputs = (attn_output, attn_weights) + else: + outputs = (attn_output, None) + + return outputs + + +class SamVisionLayer(nn.Module): + def __init__(self, config, window_size): + super().__init__() + self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.attn = SamVisionAttention(config, window_size) + self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.mlp = SamMLPBlock(config) + self.window_size = window_size + + def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Args: + Partition into non-overlapping windows with padding if needed. + hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window + size. + + Returns: + windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel]. + (pad_height, pad_width): padded height and width before partition + """ + batch_size, height, width, channel = hidden_states.shape + + pad_h = (window_size - height % window_size) % window_size + pad_w = (window_size - width % window_size) % window_size + hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h)) + pad_height, pad_width = height + pad_h, width + pad_w + + hidden_states = hidden_states.reshape( + batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel + ) + windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel) + return windows, (pad_height, pad_width) + + def window_unpartition( + self, windows: torch.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] + ) -> torch.Tensor: + """ + Args: + Window unpartition into original sequences and removing padding. + hidden_states (tensor): + input tokens with [batch_size * num_windows, window_size, window_size, channel]. + window_size (int): + window size. + padding_shape (Tuple): + padded height and width (pad_height, pad_width). + original_shape (Tuple): original height and width (height, width) before padding. + + Returns: + hidden_states: unpartitioned sequences with [batch_size, height, width, channel]. + """ + pad_height, pad_width = padding_shape + height, width = original_shape + batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size) + hidden_states = windows.reshape( + batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1 + ) + hidden_states = ( + hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1) + ) + + hidden_states = hidden_states[:, :height, :width, :].contiguous() + return hidden_states + + def forward( + self, + hidden_states: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + # Window partition + if self.window_size > 0: + height, width = hidden_states.shape[1], hidden_states.shape[2] + hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) + + hidden_states, attn_weights = self.attn( + hidden_states=hidden_states, + output_attentions=output_attentions, + ) + # Reverse window partition + if self.window_size > 0: + hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) + + hidden_states = residual + hidden_states + layernorm_output = self.layer_norm2(hidden_states) + hidden_states = hidden_states + self.mlp(layernorm_output) + + outputs = (hidden_states,) + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class SamVisionNeck(nn.Module): + def __init__(self, config: SamVisionConfig): + super().__init__() + self.config = config + + self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False) + self.layer_norm1 = SamLayerNorm(config.output_channels, data_format="channels_first") + self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False) + self.layer_norm2 = SamLayerNorm(config.output_channels, data_format="channels_first") + + def forward(self, hidden_states): + hidden_states = hidden_states.permute(0, 3, 1, 2) + hidden_states = self.conv1(hidden_states) + hidden_states = self.layer_norm1(hidden_states) + + hidden_states = self.conv2(hidden_states) + hidden_states = self.layer_norm2(hidden_states) + return hidden_states + + +class SamVisionEncoder(nn.Module): + def __init__(self, config: SamVisionConfig): + super().__init__() + self.config = config + self.image_size = config.image_size + + self.patch_embed = SamPatchEmbeddings(config) + + self.pos_embed = None + if config.use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter( + torch.zeros( + 1, + config.image_size // config.patch_size, + config.image_size // config.patch_size, + config.hidden_size, + ) + ) + + self.layers = nn.ModuleList() + for i in range(config.num_hidden_layers): + layer = SamVisionLayer( + config, + window_size=config.window_size if i not in config.global_attn_indexes else 0, + ) + self.layers.append(layer) + + self.neck = SamVisionNeck(config) + + self.gradient_checkpointing = False + + def get_input_embeddings(self): + return self.patch_embed + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SamVisionEncoderOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.patch_embed(pixel_values) + if self.pos_embed is not None: + hidden_states = hidden_states + self.pos_embed + + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + ) + else: + layer_outputs = layer_module(hidden_states, output_attentions=output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + hidden_states = self.neck(hidden_states) + + if not return_dict: + outputs = (hidden_states,) + if output_hidden_states: + outputs = outputs + (all_hidden_states,) + if output_attentions: + outputs = outputs + (all_self_attentions,) + return outputs + + return SamVisionEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class SamPreTrainedModel(PreTrainedModel): + config_class = SamConfig + base_model_prefix = "sam" + main_input_name = "pixel_values" + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + +SAM_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`SamConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +SAM_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for + details. + input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`): + Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much + better results. The points can be obtained by passing a list of list of list to the processor that will + create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the + second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict + per input point), the third dimension is the number of points per segmentation mask (it is possible to pass + multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) + coordinates of the point. If a different number of points is passed either for each image, or for each + mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the + computation of the embedding will be skipped for these points using the labels. + input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`): + Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the + official implementation, there are 3 types of labels + + - `1`: the point is a point that contains the object of interest + - `0`: the point is a point that does not contain the object of interest + - `-1`: the point corresponds to the background + + We added the label: + + - `-10`: the point is a padding point, thus should be ignored by the prompt encoder + + The padding labels should be automatically done by the processor. + input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`): + Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to + much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, + that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch + size, the number of boxes per image and the coordinates of the top left and botton right point of the box. + In the order (`x1`, `y1`, `x2`, `y2`): + + - `x1`: the x coordinate of the top left point of the input box + - `y1`: the y coordinate of the top left point of the input box + - `x2`: the x coordinate of the bottom right point of the input box + - `y2`: the y coordinate of the bottom right point of the input box + + input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`): + SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to + generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be + manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). + + image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`): + Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory + efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` + method, and then feed them to the `forward` method instead of feeding the `pixel_values`. + multimask_output (`bool`, *optional*): + In the original implementation and paper, the model always outputs 3 masks per image (or per point / per + bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the + "best" mask, by specifying `multimask_output=False`. + attention_similarity (`torch.FloatTensor`, *optional*): + Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the + model is used for personalization as introduced in [PerSAM](https://arxiv.org/abs/2305.03048). + target_embedding (`torch.FloatTensor`, *optional*): + Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case + the model is used for personalization as introduced in [PerSAM](https://arxiv.org/abs/2305.03048). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ", + " optional 2D location and bounding boxes.", + SAM_START_DOCSTRING, +) +class SamModel(SamPreTrainedModel): + _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"] + + def __init__(self, config): + super().__init__(config) + self.shared_image_embedding = SamPositionalEmbedding(config.vision_config) + + self.vision_encoder = SamVisionEncoder(config.vision_config) + self.prompt_encoder = SamPromptEncoder(config.prompt_encoder_config, self.shared_image_embedding) + self.mask_decoder = SamMaskDecoder(config.mask_decoder_config) + + self.post_init() + + def get_input_embeddings(self): + return self.vision_encoder.get_input_embeddings() + + def get_image_wide_positional_embeddings(self): + size = self.config.prompt_encoder_config.image_embedding_size + target_device = self.shared_image_embedding.positional_embedding.device + target_dtype = self.shared_image_embedding.positional_embedding.dtype + grid = torch.ones((size, size), device=target_device, dtype=target_dtype) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / size + x_embed = x_embed / size + + positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1)) + return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width + + @torch.no_grad() + def get_image_embeddings( + self, + pixel_values, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns the image embeddings by passing the pixel values through the vision encoder. + + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Input pixel values + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + """ + vision_output = self.vision_encoder( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + image_embeddings = vision_output[0] + return image_embeddings + + @torch.no_grad() + def get_prompt_embeddings( + self, + input_points: Optional[torch.FloatTensor] = None, + input_labels: Optional[torch.LongTensor] = None, + input_boxes: Optional[torch.FloatTensor] = None, + input_masks: Optional[torch.LongTensor] = None, + ): + r""" + Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. + + Args: + input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): + Optional input points for the prompt encoder. The padding of the point is automatically done by the + processor. `point_batch_size` refers to the number of masks that we want the model to predict per + point. The model will output `point_batch_size` times 3 masks in total. + input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): + Optional input labels for the prompt encoder. The padding of the labels is automatically done by the + processor, or can be fed by the user. + input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`): + Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the + processor. users can also pass manually the input boxes. + input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`): + Optional input masks for the prompt encoder. + """ + prompt_output = self.prompt_encoder( + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + input_masks=input_masks, + ) + return prompt_output + + @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + input_points: Optional[torch.FloatTensor] = None, + input_labels: Optional[torch.LongTensor] = None, + input_boxes: Optional[torch.FloatTensor] = None, + input_masks: Optional[torch.LongTensor] = None, + image_embeddings: Optional[torch.FloatTensor] = None, + multimask_output: bool = True, + attention_similarity: Optional[torch.FloatTensor] = None, + target_embedding: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> List[Dict[str, torch.Tensor]]: + r""" + Example: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoModel, AutoProcessor + + >>> model = AutoModel.from_pretrained("facebook/sam-vit-base") + >>> processor = AutoProcessor.from_pretrained("facebook/sam-vit-base") + + >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png" + >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") + >>> input_points = [[[400, 650]]] # 2D location of a window on the car + >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt") + + >>> # Get segmentation mask + >>> outputs = model(**inputs) + + >>> # Postprocess masks + >>> masks = processor.post_process_masks( + ... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"] + ... ) + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None and image_embeddings is None: + raise ValueError("Either pixel_values or image_embeddings must be provided.") + + if pixel_values is not None and image_embeddings is not None: + raise ValueError("Only one of pixel_values and image_embeddings can be provided.") + + if input_points is not None and len(input_points.shape) != 4: + raise ValueError( + "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", + " got {}.".format(input_points.shape), + ) + if input_boxes is not None and len(input_boxes.shape) != 3: + raise ValueError( + "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", + " got {}.".format(input_boxes.shape), + ) + if input_points is not None and input_boxes is not None: + point_batch_size = input_points.shape[1] + box_batch_size = input_boxes.shape[1] + if point_batch_size != box_batch_size: + raise ValueError( + "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( + point_batch_size, box_batch_size + ) + ) + + image_positional_embeddings = self.get_image_wide_positional_embeddings() + # repeat with batch size + batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0] + image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) + + vision_attentions = None + vision_hidden_states = None + + if pixel_values is not None: + vision_outputs = self.vision_encoder( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + image_embeddings = vision_outputs[0] + + if output_hidden_states: + vision_hidden_states = vision_outputs[1] + if output_attentions: + vision_attentions = vision_outputs[-1] + + if input_points is not None and input_labels is None: + input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device) + + if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: + raise ValueError( + "The batch size of the image embeddings and the input points must be the same. ", + "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), + " if you want to pass multiple points for the same image, make sure that you passed ", + " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", + " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", + ) + + sparse_embeddings, dense_embeddings = self.prompt_encoder( + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + input_masks=input_masks, + ) + + low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( + image_embeddings=image_embeddings, + image_positional_embeddings=image_positional_embeddings, + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + attention_similarity=attention_similarity, + target_embedding=target_embedding, + output_attentions=output_attentions, + ) + + if not return_dict: + output = (iou_predictions, low_res_masks) + if output_hidden_states: + output = output + (vision_hidden_states,) + + if output_attentions: + output = output + (vision_attentions, mask_decoder_attentions) + return output + + return SamImageSegmentationOutput( + iou_scores=iou_predictions, + pred_masks=low_res_masks, + vision_hidden_states=vision_hidden_states, + vision_attentions=vision_attentions, + mask_decoder_attentions=mask_decoder_attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py b/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..f527337cd6cdaa8c653272795ae98fe4deadaa0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/modeling_tf_sam.py @@ -0,0 +1,1656 @@ +# coding=utf-8 +# Copyright 2023 The Meta AI Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +TensorFlow SAM model. This file was mostly generated by auto-translation from the PyTorch original. In the event of a +discrepancy, the original file should be regarded as the 'reference' version. +""" + + +from __future__ import annotations + +import collections +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import ACT2FN +from ...modeling_tf_outputs import TFBaseModelOutput +from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs +from ...tf_utils import flatten, functional_layernorm +from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_sam import SamConfig, SamMaskDecoderConfig, SamPromptEncoderConfig, SamVisionConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "SamConfig" +_CHECKPOINT_FOR_DOC = "facebook/sam-vit-huge" + + +from ..deprecated._archive_maps import TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +@dataclass +class TFSamVisionEncoderOutput(ModelOutput): + """ + Base class for sam vision model's outputs that also contains image embeddings obtained by applying the projection + layer to the pooler_output. + + Args: + image_embeds (`tf.Tensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for + the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + image_embeds: tf.Tensor | None = None + last_hidden_state: tf.Tensor = None + hidden_states: Tuple[tf.Tensor, ...] | None = None + attentions: Tuple[tf.Tensor, ...] | None = None + + +@dataclass +class TFSamImageSegmentationOutput(ModelOutput): + """ + Base class for Segment-Anything model's output + + Args: + iou_scores (`tf.Tensor` of shape `(batch_size, num_masks)`): + The iou scores of the predicted masks. + pred_masks (`tf.Tensor` of shape `(batch_size, num_masks, height, width)`): + The predicted low resolutions masks. Needs to be post-processed by the processor + vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for + the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the vision model at the output of each layer plus the optional initial embedding outputs. + vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + mask_decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + iou_scores: tf.Tensor = None + pred_masks: tf.Tensor = None + vision_hidden_states: Tuple[tf.Tensor, ...] | None = None + vision_attentions: Tuple[tf.Tensor, ...] | None = None + mask_decoder_attentions: Tuple[tf.Tensor, ...] | None = None + + +class TFSamPatchEmbeddings(keras.layers.Layer): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = keras.layers.Conv2D( + hidden_size, kernel_size=patch_size, strides=patch_size, name="projection" + ) + + def call(self, pixel_values): + batch_size, num_channels, height, width = shape_list(pixel_values) + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(tf.transpose(pixel_values, perm=[0, 2, 3, 1])) + return embeddings + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "projection", None) is not None: + with tf.name_scope(self.projection.name): + self.projection.build([None, None, None, self.num_channels]) + + +class TFSamMLPBlock(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.lin1 = keras.layers.Dense(config.mlp_dim, name="lin1") + self.lin2 = keras.layers.Dense(config.hidden_size, name="lin2") + self.act = ACT2FN[config.hidden_act] + self.config = config + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.lin1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.lin2(hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "lin1", None) is not None: + with tf.name_scope(self.lin1.name): + self.lin1.build([None, None, self.config.hidden_size]) + if getattr(self, "lin2", None) is not None: + with tf.name_scope(self.lin2.name): + self.lin2.build([None, None, self.config.mlp_dim]) + + +class TFSamLayerNorm(keras.layers.Layer): + r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last", **kwargs): + super().__init__(**kwargs) + self.eps = eps + self.data_format = data_format + self.normalized_shape = normalized_shape + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError(f"Unsupported data format: {self.data_format}") + + def build(self, input_shape): + self.weight = self.add_weight(shape=self.normalized_shape, initializer="ones", name="weight") + self.bias = self.add_weight(shape=self.normalized_shape, initializer="zeros", name="bias") + super().build(input_shape) + + def call(self, x: tf.Tensor) -> tf.Tensor: + if self.data_format == "channels_last": + x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=-1) + elif self.data_format == "channels_first": + x = functional_layernorm(x, weight=self.weight, bias=self.bias, epsilon=self.eps, axis=1) + return x + + +class TFSamAttention(keras.layers.Layer): + """ + SAM's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and + values. + """ + + def __init__(self, config, downsample_rate=None, **kwargs): + super().__init__(**kwargs) + self.hidden_size = config.hidden_size + + downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate + + self.internal_dim = config.hidden_size // downsample_rate + self.num_attention_heads = config.num_attention_heads + if self.internal_dim % config.num_attention_heads != 0: + raise ValueError("num_attention_heads must divide hidden_size.") + + self.q_proj = keras.layers.Dense(self.internal_dim, name="q_proj") + self.k_proj = keras.layers.Dense(self.internal_dim, name="k_proj") + self.v_proj = keras.layers.Dense(self.internal_dim, name="v_proj") + self.out_proj = keras.layers.Dense(self.hidden_size, name="out_proj") + + def _separate_heads(self, hidden_states: tf.Tensor, num_attention_heads: int) -> tf.Tensor: + batch, point_batch_size, n_tokens, channel = shape_list(hidden_states) + c_per_head = channel // num_attention_heads + hidden_states = tf.reshape( + hidden_states, (batch * point_batch_size, n_tokens, num_attention_heads, c_per_head) + ) + return tf.transpose(hidden_states, perm=[0, 2, 1, 3]) + + def _recombine_heads(self, hidden_states: tf.Tensor, point_batch_size: int) -> tf.Tensor: + batch, n_heads, n_tokens, c_per_head = shape_list(hidden_states) + hidden_states = tf.transpose(hidden_states, perm=[0, 2, 1, 3]) + return tf.reshape( + hidden_states, + (batch // tf.reduce_max([1, point_batch_size]), point_batch_size, n_tokens, n_heads * c_per_head), + ) + + def call(self, query: tf.Tensor, key: tf.Tensor, value: tf.Tensor) -> tf.Tensor: + # Input projections + query = self.q_proj(query) + key = self.k_proj(key) + value = self.v_proj(value) + + point_batch_size = shape_list(query)[1] + # Separate into heads + query = self._separate_heads(query, self.num_attention_heads) + key = self._separate_heads(key, self.num_attention_heads) + value = self._separate_heads(value, self.num_attention_heads) + + # SamAttention + _, _, _, c_per_head = shape_list(query) + attn = tf.matmul( + query, tf.transpose(key, perm=[0, 1, 3, 2]) + ) # batch_size * point_batch_size x N_heads x N_tokens x N_tokens + attn = attn / tf.math.sqrt(float(c_per_head)) + attn = tf.nn.softmax(attn, axis=-1) + + # Get output + out = tf.matmul(attn, value) + out = self._recombine_heads(out, point_batch_size) + out = self.out_proj(out) + + return out + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "q_proj", None) is not None: + with tf.name_scope(self.q_proj.name): + self.q_proj.build([None, None, self.hidden_size]) + if getattr(self, "k_proj", None) is not None: + with tf.name_scope(self.k_proj.name): + self.k_proj.build([None, None, self.hidden_size]) + if getattr(self, "v_proj", None) is not None: + with tf.name_scope(self.v_proj.name): + self.v_proj.build([None, None, self.hidden_size]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.internal_dim]) + + +class TFSamTwoWayAttentionBlock(keras.layers.Layer): + def __init__(self, config, attention_downsample_rate: int = 2, skip_first_layer_pe: bool = False, **kwargs): + """ + A transformer block with four layers: + (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on + sparse inputs (4) cross attention of dense inputs -> sparse inputs + + Arguments: + config (`SamMaskDecoderConfig`): + The configuration file used to instantiate the block + attention_downsample_rate (*optionalk*, int, defaults to 2): + The downsample ratio of the block used to reduce the inner dim of the attention. + skip_first_layer_pe (*optional*, bool, defaults to `False`): + Whether or not to skip the addition of the query_point_embedding on the first layer. + """ + super().__init__(**kwargs) + + self.hidden_size = config.hidden_size + self.layer_norm_eps = config.layer_norm_eps + + self.self_attn = TFSamAttention(config, downsample_rate=1, name="self_attn") + self.layer_norm1 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm1") + + self.cross_attn_token_to_image = TFSamAttention( + config, downsample_rate=attention_downsample_rate, name="cross_attn_token_to_image" + ) + self.layer_norm2 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm2") + + self.mlp = TFSamMLPBlock(config, name="mlp") + self.layer_norm3 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm3") + + self.layer_norm4 = keras.layers.LayerNormalization(epsilon=self.layer_norm_eps, name="layer_norm4") + self.cross_attn_image_to_token = TFSamAttention( + config, downsample_rate=attention_downsample_rate, name="cross_attn_image_to_token" + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def call( + self, + queries: tf.Tensor, + keys: tf.Tensor, + query_point_embedding: tf.Tensor, + key_point_embedding: tf.Tensor, + output_attentions: bool = False, + ): + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(query=queries, key=queries, value=queries) + else: + query = queries + query_point_embedding + attn_out = self.self_attn(query=query, key=query, value=queries) + queries = queries + attn_out + queries = self.layer_norm1(queries) + + # Cross attention block, tokens attending to image embedding + query = queries + query_point_embedding + key = keys + key_point_embedding + + attn_out = self.cross_attn_token_to_image(query=query, key=key, value=keys) + queries = queries + attn_out + + queries = self.layer_norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.layer_norm3(queries) + + # Cross attention block, image embedding attending to tokens + query = queries + query_point_embedding + key = keys + key_point_embedding + + attn_out = self.cross_attn_image_to_token(query=key, key=query, value=queries) + keys = keys + attn_out + + keys = self.layer_norm4(keys) + + outputs = (queries, keys) + + if output_attentions: + outputs = outputs + (attn_out,) + else: + outputs = outputs + (None,) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "layer_norm1", None) is not None: + with tf.name_scope(self.layer_norm1.name): + self.layer_norm1.build([None, None, None, self.hidden_size]) + if getattr(self, "cross_attn_token_to_image", None) is not None: + with tf.name_scope(self.cross_attn_token_to_image.name): + self.cross_attn_token_to_image.build(None) + if getattr(self, "layer_norm2", None) is not None: + with tf.name_scope(self.layer_norm2.name): + self.layer_norm2.build([None, None, None, self.hidden_size]) + if getattr(self, "mlp", None) is not None: + with tf.name_scope(self.mlp.name): + self.mlp.build(None) + if getattr(self, "layer_norm3", None) is not None: + with tf.name_scope(self.layer_norm3.name): + self.layer_norm3.build([None, None, None, self.hidden_size]) + if getattr(self, "layer_norm4", None) is not None: + with tf.name_scope(self.layer_norm4.name): + self.layer_norm4.build([None, None, None, self.hidden_size]) + if getattr(self, "cross_attn_image_to_token", None) is not None: + with tf.name_scope(self.cross_attn_image_to_token.name): + self.cross_attn_image_to_token.build(None) + + +class TFSamTwoWayTransformer(keras.layers.Layer): + def __init__(self, config: SamMaskDecoderConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + + self.num_hidden_layers = config.num_hidden_layers + self.layers = [] + + for i in range(self.num_hidden_layers): + self.layers.append(TFSamTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0), name=f"layers_._{i}")) + + self.final_attn_token_to_image = TFSamAttention(config, name="final_attn_token_to_image") + self.layer_norm_final_attn = keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name="layer_norm_final_attn" + ) + + def call( + self, + point_embeddings: tf.Tensor, + image_embeddings: tf.Tensor, + image_positional_embeddings: tf.Tensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TFBaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + all_attentions = () + + if image_embeddings is None: + raise ValueError("You have to specify an image_embedding") + + image_embeddings = tf.transpose(flatten(image_embeddings, 2), perm=(0, 2, 1))[:, None] + image_positional_embeddings = tf.transpose(flatten(image_positional_embeddings, 2), (0, 2, 1))[:, None] + + # Prepare queries + queries = point_embeddings + keys = image_embeddings + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys, attention_outputs = layer( + queries=queries, + keys=keys, + query_point_embedding=point_embeddings, + key_point_embedding=image_positional_embeddings, + output_attentions=output_attentions, + ) + + if output_attentions: + all_attentions = all_attentions + (attention_outputs,) + + # Apply the final attenion layer from the points to the image + query = queries + point_embeddings + key = keys + image_positional_embeddings + + attn_out = self.final_attn_token_to_image(query=query, key=key, value=keys) + + queries = queries + attn_out + queries = self.layer_norm_final_attn(queries) + return queries, keys, all_attentions + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "final_attn_token_to_image", None) is not None: + with tf.name_scope(self.final_attn_token_to_image.name): + self.final_attn_token_to_image.build(None) + if getattr(self, "layer_norm_final_attn", None) is not None: + with tf.name_scope(self.layer_norm_final_attn.name): + self.layer_norm_final_attn.build([None, None, None, self.config.hidden_size]) + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFSamFeedForward(keras.layers.Layer): + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool = False, **kwargs + ): + super().__init__(**kwargs) + self.num_layers = num_layers + self.activation = keras.layers.ReLU() + self.proj_in = keras.layers.Dense(hidden_dim, input_shape=(input_dim,), name="proj_in") + self.proj_out = keras.layers.Dense(output_dim, input_shape=(hidden_dim,), name="proj_out") + self.layers = [ + keras.layers.Dense(hidden_dim, input_shape=(hidden_dim,), name=f"layers_._{i}") + for i in range(num_layers - 2) + ] + self.sigmoid_output = sigmoid_output + self.hidden_dim = hidden_dim + self.input_dim = input_dim + + def call(self, hidden_states): + hidden_states = self.proj_in(hidden_states) + hidden_states = self.activation(hidden_states) + for layer in self.layers: + hidden_states = self.activation(layer(hidden_states)) + + hidden_states = self.proj_out(hidden_states) + if self.sigmoid_output: + hidden_states = tf.sigmoid(hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "proj_in", None) is not None: + with tf.name_scope(self.proj_in.name): + self.proj_in.build([None, None, self.input_dim]) + if getattr(self, "proj_out", None) is not None: + with tf.name_scope(self.proj_out.name): + self.proj_out.build([None, None, self.hidden_dim]) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build([None, None, self.hidden_dim]) + + +class TFSamMaskDecoder(keras.layers.Layer): + def __init__(self, config: SamMaskDecoderConfig, **kwargs): + super().__init__(**kwargs) + + self.hidden_size = config.hidden_size + + self.num_multimask_outputs = config.num_multimask_outputs + self.num_mask_tokens = config.num_multimask_outputs + 1 + + self.transformer = TFSamTwoWayTransformer(config, name="transformer") + + self.upscale_conv1 = keras.layers.Conv2DTranspose( + self.hidden_size // 4, kernel_size=2, strides=2, name="upscale_conv1", data_format="channels_first" + ) + self.upscale_conv2 = keras.layers.Conv2DTranspose( + self.hidden_size // 8, kernel_size=2, strides=2, name="upscale_conv2", data_format="channels_first" + ) + self.upscale_layer_norm = TFSamLayerNorm( + self.hidden_size // 4, data_format="channels_first", name="upscale_layer_norm" + ) + self.activation = tf.nn.gelu + + mlps_list = [] + for i in range(self.num_mask_tokens): + mlps_list += [ + TFSamFeedForward( + self.hidden_size, + self.hidden_size, + self.hidden_size // 8, + 3, + name=f"output_hypernetworks_mlps_._{i}", + ) + ] + self.output_hypernetworks_mlps = mlps_list + + self.iou_prediction_head = TFSamFeedForward( + self.hidden_size, + config.iou_head_hidden_dim, + self.num_mask_tokens, + config.iou_head_depth, + name="iou_prediction_head", + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + self.iou_token = self.add_weight(shape=(1, self.hidden_size), name="iou_token.weight", trainable=True) + self.mask_tokens = self.add_weight( + shape=(self.num_mask_tokens, self.hidden_size), name="mask_tokens.weight", trainable=True + ) + + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + if getattr(self, "upscale_conv1", None) is not None: + with tf.name_scope(self.upscale_conv1.name): + self.upscale_conv1.build([None, self.hidden_size, None, None]) + if getattr(self, "upscale_conv2", None) is not None: + with tf.name_scope(self.upscale_conv2.name): + self.upscale_conv2.build([None, self.hidden_size // 4, None, None]) + if getattr(self, "upscale_layer_norm", None) is not None: + with tf.name_scope(self.upscale_layer_norm.name): + self.upscale_layer_norm.build(None) + if getattr(self, "iou_prediction_head", None) is not None: + with tf.name_scope(self.iou_prediction_head.name): + self.iou_prediction_head.build(None) + for mlp in self.output_hypernetworks_mlps: + with tf.name_scope(mlp.name): + mlp.build(None) + + def call( + self, + image_embeddings: tf.Tensor, + image_positional_embeddings: tf.Tensor, + sparse_prompt_embeddings: tf.Tensor, + dense_prompt_embeddings: tf.Tensor, + multimask_output: bool, + output_attentions: Optional[bool] = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + batch_size, num_channels, height, width = shape_list(image_embeddings) + point_batch_size = tf.math.maximum(1, tf.shape(sparse_prompt_embeddings)[1]) + + output_tokens = tf.concat([self.iou_token, self.mask_tokens], axis=0) # Should be (1, 32) + (4, 32) = (5, 32) + output_tokens = tf.tile( + output_tokens[None, None, :], [batch_size, point_batch_size, 1, 1] + ) # Should be (batch_size, point_size, 5, 32) + + # Matt: The original Torch code checked that the sum of sparse_prompt_embeddings equalled 0. However, this only + # happens when the sparse prompt embeddings are an empty tensor with shape[1] == 0. I replaced + # it with an explicit shape check to avoid data-dependent control flow which breaks XLA. + if shape_list(sparse_prompt_embeddings)[1] != 0: + tokens = tf.concat((output_tokens, sparse_prompt_embeddings), axis=2) + else: + tokens = output_tokens + point_embeddings = tf.cast(tokens, self.iou_token.dtype) + + image_embeddings = image_embeddings + dense_prompt_embeddings + image_embeddings = tf.repeat(image_embeddings, point_batch_size, axis=0) + image_positional_embeddings = tf.repeat(image_positional_embeddings, point_batch_size, axis=0) + + point_embedding, image_embeddings, attentions = self.transformer( + point_embeddings=point_embeddings, + image_embeddings=image_embeddings, + image_positional_embeddings=image_positional_embeddings, + output_attentions=output_attentions, + ) + iou_token_out = point_embedding[:, :, 0, :] + mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :] + + image_embeddings = tf.transpose(image_embeddings, perm=(0, 1, 3, 2)) + image_embeddings = tf.reshape(image_embeddings, [batch_size * point_batch_size, num_channels, height, width]) + + upscaled_embedding = self.upscale_conv1(image_embeddings) + upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) + upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding)) + + hyper_in_list = [] + for i in range(self.num_mask_tokens): + current_mlp = self.output_hypernetworks_mlps[i] + hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] + hyper_in = tf.stack(hyper_in_list, axis=2) + + _, num_channels, height, width = shape_list(upscaled_embedding) + upscaled_embedding = tf.reshape( + upscaled_embedding, [batch_size, point_batch_size, num_channels, height * width] + ) + masks = tf.reshape(hyper_in @ upscaled_embedding, [batch_size, point_batch_size, -1, height, width]) + + iou_pred = self.iou_prediction_head(iou_token_out) + + if multimask_output: + mask_slice = slice(1, None) + else: + mask_slice = slice(0, 1) + masks = masks[:, :, mask_slice, :, :] + iou_pred = iou_pred[:, :, mask_slice] + + outputs = (masks, iou_pred) + + if output_attentions: + outputs = outputs + (attentions,) + else: + outputs = outputs + (None,) + + return outputs + + +class TFSamPositionalEmbedding(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.scale = config.hidden_size // 2 + self.config = config + + def build(self, input_shape): + # TODO Matt: What is going on here? Why is a non-trainable weight randomly initialized? + self.positional_embedding = self.add_weight( + name="positional_embedding", + shape=(2, self.config.num_pos_feats), + initializer=keras.initializers.RandomNormal(mean=0.0, stddev=self.scale), + trainable=False, + ) + super().build(input_shape) + + def call(self, input_coords, input_shape=None): + """Positionally encode points that are normalized to [0,1].""" + coordinates = tf.identity(input_coords) + + if input_shape is not None: + coordinates = tf.stack( + [ + tf.cast(coordinates[:, :, :, 0], tf.float32) / input_shape[1], + tf.cast(coordinates[:, :, :, 1], tf.float32) / input_shape[0], + ], + axis=-1, + ) + + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coordinates = 2 * coordinates - 1 + coordinates = tf.cast(coordinates, self.positional_embedding.dtype) + coordinates = tf.matmul(coordinates, self.positional_embedding) + coordinates = 2 * np.pi * coordinates + # outputs d_1 x ... x d_n x channel shape + return tf.concat([tf.sin(coordinates), tf.cos(coordinates)], axis=-1) + + +class TFSamMaskEmbedding(keras.layers.Layer): + def __init__(self, config: SamPromptEncoderConfig, **kwargs): + super().__init__(**kwargs) + self.mask_input_channels = config.mask_input_channels // 4 + self.activation = ACT2FN[config.hidden_act] + self.conv1 = keras.layers.Conv2D(self.mask_input_channels, kernel_size=2, strides=2, name="conv1") + self.conv2 = keras.layers.Conv2D(config.mask_input_channels, kernel_size=2, strides=2, name="conv2") + self.conv3 = keras.layers.Conv2D(config.hidden_size, kernel_size=1, name="conv3") + self.layer_norm1 = TFSamLayerNorm(self.mask_input_channels, config.layer_norm_eps, name="layer_norm1") + self.layer_norm2 = TFSamLayerNorm(self.mask_input_channels * 4, config.layer_norm_eps, name="layer_norm2") + self.config = config + + def call(self, masks): + masks = tf.transpose(masks, perm=(0, 2, 3, 1)) # Convert to channels-last + hidden_states = self.conv1(masks) + hidden_states = self.layer_norm1(hidden_states) + hidden_states = self.activation(hidden_states) + + hidden_states = self.conv2(hidden_states) + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.activation(hidden_states) + dense_embeddings = self.conv3(hidden_states) + dense_embeddings = tf.transpose(dense_embeddings, perm=(0, 3, 1, 2)) # Convert back to channels-first + return dense_embeddings + + def build(self, input_shape=None): + # This class needs an explicit build method because it isn't called with the standard dummy inputs + if self.built: + return + self.built = True + with tf.name_scope("conv1"): + self.conv1.build([None, None, None, 1]) + with tf.name_scope("conv2"): + self.conv2.build([None, None, None, self.mask_input_channels]) + with tf.name_scope("conv3"): + self.conv3.build([None, None, None, self.mask_input_channels * 4]) + with tf.name_scope("layer_norm1"): + self.layer_norm1.build([None, None, None, self.mask_input_channels]) + with tf.name_scope("layer_norm2"): + self.layer_norm2.build([None, None, None, self.mask_input_channels * 4]) + + +class TFSamPromptEncoder(keras.layers.Layer): + def __init__(self, config: SamPromptEncoderConfig, shared_patch_embedding, **kwargs): + super().__init__(**kwargs) + self.shared_embedding = shared_patch_embedding + self.mask_embed = TFSamMaskEmbedding(config, name="mask_embed") + self.no_mask_embed = None + + self.image_embedding_size = (config.image_embedding_size, config.image_embedding_size) + self.input_image_size = config.image_size + + self.point_embed = [] + self.hidden_size = config.hidden_size + self.not_a_point_embed = None + self.config = config + + def build(self, input_shape=None): + self.no_mask_embed = self.add_weight( + name="no_mask_embed.weight", + shape=(1, self.hidden_size), + initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), + trainable=True, + ) + self.point_embed = [ + self.add_weight( + name=f"point_embed_._{i}.weight", + shape=(1, self.hidden_size), + initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), + trainable=True, + ) + for i in range(self.config.num_point_embeddings) + ] + self.not_a_point_embed = self.add_weight( + name="not_a_point_embed.weight", + shape=(1, self.hidden_size), + initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02), + trainable=True, + ) + with tf.name_scope("mask_embed"): + # We must explicitly build the mask embed because it isn't touched by the standard dummy inputs + self.mask_embed.build( + (None, self.config.mask_input_channels, self.config.image_size, self.config.image_size) + ) + + if self.built: + return + self.built = True + if getattr(self, "mask_embed", None) is not None: + with tf.name_scope(self.mask_embed.name): + self.mask_embed.build(None) + + def _embed_points(self, points: tf.Tensor, labels: tf.Tensor, pad: bool) -> tf.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + target_point_shape = (shape_list(points)[0], shape_list(points)[1], 1, shape_list(points)[-1]) + target_labels_shape = (shape_list(points)[0], shape_list(points)[1], 1) + padding_point = tf.zeros(target_point_shape, dtype=points.dtype) + padding_label = -tf.ones(target_labels_shape, dtype=labels.dtype) + points = tf.concat([points, padding_point], axis=2) + labels = tf.concat([labels, padding_label], axis=2) + input_shape = (self.input_image_size, self.input_image_size) + point_embedding = self.shared_embedding(points, input_shape) + + point_embedding = tf.where(labels[..., None] == -1, self.not_a_point_embed[0], point_embedding) + + point_embedding = tf.where( + labels[..., None] != -10, + point_embedding, + tf.zeros_like(point_embedding), + ) + point_embedding = tf.where( + (labels == 0)[:, :, :, None], point_embedding + self.point_embed[0], point_embedding + ) + point_embedding = tf.where( + (labels == 1)[:, :, :, None], point_embedding + self.point_embed[1], point_embedding + ) + return point_embedding + + def _embed_boxes(self, boxes: tf.Tensor) -> tf.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + batch_size, nb_boxes = shape_list(boxes)[:2] + coords = tf.reshape(boxes, (batch_size, nb_boxes, 2, 2)) + input_shape = (self.input_image_size, self.input_image_size) + corner_embedding = self.shared_embedding(coords, input_shape) + corner_embedding += tf.where( + tf.range(shape_list(corner_embedding)[2])[None, None, :, None] == 0, + self.point_embed[2][0], + self.point_embed[3][0], + ) + return corner_embedding + + def call( + self, + batch_size: Optional[int], + input_points: Optional[Tuple[tf.Tensor, tf.Tensor]], + input_labels: tf.Tensor | None, + input_boxes: tf.Tensor | None, + input_masks: tf.Tensor | None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense embeddings. + + Args: + points (`tf.Tensor`, *optional*): + point coordinates and labels to embed. + boxes (`tf.Tensor`, *optional*): + boxes to embed + masks (`tf.Tensor`, *optional*): + masks to embed + """ + sparse_embeddings = None + if input_points is not None: + batch_size, point_batch_size = shape_list(input_points)[:2] + if input_labels is None: + raise ValueError("If points are provided, labels must also be provided.") + point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) + sparse_embeddings = tf.zeros( + (batch_size, point_batch_size, 0, self.hidden_size), dtype=point_embeddings.dtype + ) + sparse_embeddings = tf.concat([sparse_embeddings, point_embeddings], axis=2) + if input_boxes is not None: + batch_size = shape_list(input_boxes)[0] + box_embeddings = self._embed_boxes(input_boxes) + if sparse_embeddings is None: + sparse_embeddings = box_embeddings + else: + sparse_embeddings = tf.concat([sparse_embeddings, box_embeddings], axis=2) + if input_masks is not None: + dense_embeddings = self.mask_embed(input_masks) + else: + dense_embeddings = self.no_mask_embed[0] + dense_embeddings = tf.reshape(dense_embeddings, (1, -1, 1, 1)) + dense_embeddings = tf.tile( + dense_embeddings, (batch_size, 1, self.image_embedding_size[0], self.image_embedding_size[1]) + ) + if sparse_embeddings is None: + sparse_embeddings = tf.zeros((batch_size, 0, 1, self.hidden_size), dtype=dense_embeddings.dtype) + + return sparse_embeddings, dense_embeddings + + +class TFSamVisionAttention(keras.layers.Layer): + """Multi-head Attention block with relative position embeddings.""" + + def __init__(self, config, window_size, **kwargs): + super().__init__(**kwargs) + input_size = ( + (config.image_size // config.patch_size, config.image_size // config.patch_size) + if window_size == 0 + else (window_size, window_size) + ) + self.input_size = input_size + + self.num_attention_heads = config.num_attention_heads + head_dim = config.hidden_size // config.num_attention_heads + self.head_dim = head_dim + self.scale = head_dim**-0.5 + self.dropout = config.attention_dropout + + self.qkv = keras.layers.Dense(config.hidden_size * 3, use_bias=config.qkv_bias, name="qkv") + self.proj = keras.layers.Dense(config.hidden_size, name="proj") + + self.use_rel_pos = config.use_rel_pos + if self.use_rel_pos: + if input_size is None: + raise ValueError("Input size must be provided if using relative positional encoding.") + self.config = config + + def build(self, input_shape=None): + if self.input_size is not None: + # initialize relative positional embeddings + self.rel_pos_h = self.add_weight( + shape=(2 * self.input_size[0] - 1, self.head_dim), initializer="zeros", name="rel_pos_h" + ) + self.rel_pos_w = self.add_weight( + shape=(2 * self.input_size[1] - 1, self.head_dim), initializer="zeros", name="rel_pos_w" + ) + + if self.built: + return + self.built = True + if getattr(self, "qkv", None) is not None: + with tf.name_scope(self.qkv.name): + self.qkv.build([None, None, self.config.hidden_size]) + if getattr(self, "proj", None) is not None: + with tf.name_scope(self.proj.name): + self.proj.build([None, None, self.config.hidden_size]) + + def get_rel_pos(self, q_size: int, k_size: int, rel_pos: tf.Tensor) -> tf.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + + Args: + q_size (int): + size of the query. + k_size (int): + size of key k. + rel_pos (`tf.Tensor`): + relative position embeddings (L, channel). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = tf.image.resize( + tf.reshape(rel_pos, (1, rel_pos.shape[0], -1)), + size=(max_rel_dist, rel_pos.shape[1]), + method="bilinear", + ) + rel_pos_resized = tf.reshape(rel_pos_resized, (-1, max_rel_dist)) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = tf.expand_dims(tf.range(q_size, dtype=tf.float32), 1) * max(k_size / q_size, 1.0) + k_coords = tf.expand_dims(tf.range(k_size, dtype=tf.float32), 0) * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return tf.gather(rel_pos_resized, tf.cast(relative_coords, tf.int32)) + + def add_decomposed_rel_pos( + self, + attn: tf.Tensor, + query: tf.Tensor, + rel_pos_h: tf.Tensor, + rel_pos_w: tf.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], + ) -> tf.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + + Args: + attn (`tf.Tensor`): + attention map. + query (`tf.Tensor`): + query q in the attention layer with shape (batch_size, query_height * query_width, channel). + rel_pos_h (`tf.Tensor`): + relative position embeddings (Lh, channel) for height axis. + rel_pos_w (`tf.Tensor`): + relative position embeddings (Lw, channel) for width axis. + q_size (tuple): + spatial sequence size of query q with (query_height, query_width). + k_size (tuple): + spatial sequence size of key k with (key_height, key_width). + + Returns: + attn (`tf.Tensor`): + attention map with added relative positional embeddings. + """ + query_height, query_width = q_size + key_height, key_width = k_size + relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) + relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) + + batch_size, _, dim = shape_list(query) + reshaped_query = tf.reshape(query, (batch_size, query_height, query_width, dim)) + rel_h = tf.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) + rel_w = tf.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) + attn = tf.reshape(attn, (batch_size, query_height, query_width, key_height, key_width)) + attn = attn + tf.expand_dims(rel_h, axis=-1) + tf.expand_dims(rel_w, axis=-2) + attn = tf.reshape(attn, (batch_size, query_height * query_width, key_height * key_width)) + return attn + + def call(self, hidden_states: tf.Tensor, output_attentions=False, training=False) -> tf.Tensor: + batch_size, height, width, _ = shape_list(hidden_states) + # qkv with shape (3, batch_size, nHead, height * width, channel) + qkv = tf.reshape(self.qkv(hidden_states), (batch_size, height * width, 3, self.num_attention_heads, -1)) + qkv = tf.transpose(qkv, perm=(2, 0, 3, 1, 4)) + # q, k, v with shape (batch_size * nHead, height * width, channel) + query, key, value = tf.unstack( + tf.reshape(qkv, (3, batch_size * self.num_attention_heads, height * width, -1)), axis=0 + ) + attn_weights = tf.matmul(query * self.scale, key, transpose_b=True) + + if self.use_rel_pos: + attn_weights = self.add_decomposed_rel_pos( + attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) + ) + + attn_weights = tf.nn.softmax(attn_weights, axis=-1) + + if training: + attn_probs = tf.nn.dropout(attn_weights, rate=self.dropout) + else: + attn_probs = attn_weights + + attn_output = tf.reshape(attn_probs @ value, (batch_size, self.num_attention_heads, height, width, -1)) + attn_output = tf.transpose(attn_output, perm=(0, 2, 3, 1, 4)) + attn_output = tf.reshape(attn_output, (batch_size, height, width, self.config.hidden_size)) + + attn_output = self.proj(attn_output) + + if output_attentions: + outputs = (attn_output, attn_weights) + else: + outputs = (attn_output, None) + + return outputs + + +class TFSamVisionLayer(keras.layers.Layer): + def __init__(self, config, window_size, **kwargs): + super().__init__(**kwargs) + self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") + self.attn = TFSamVisionAttention(config, window_size, name="attn") + self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") + self.mlp = TFSamMLPBlock(config, name="mlp") + self.window_size = window_size + self.config = config + + def window_partition(self, hidden_states: tf.Tensor, window_size: int) -> Tuple[tf.Tensor, Tuple[int, int]]: + batch_size, height, width, channel = shape_list(hidden_states) + + pad_h = (window_size - height % window_size) % window_size + pad_w = (window_size - width % window_size) % window_size + if pad_h > 0 or pad_w > 0: + hidden_states = tf.pad(hidden_states, [[0, 0], [0, pad_h], [0, pad_w], [0, 0]]) + pad_height, pad_width = height + pad_h, width + pad_w + + hidden_states = tf.reshape( + hidden_states, + [batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel], + ) + windows = tf.reshape( + tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [-1, window_size, window_size, channel] + ) + return windows, (pad_height, pad_width) + + def window_unpartition( + self, windows: tf.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int] + ) -> tf.Tensor: + pad_height, pad_width = padding_shape + height, width = original_shape + batch_size = shape_list(windows)[0] // (pad_height * pad_width // window_size // window_size) + hidden_states = tf.reshape( + windows, [batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1] + ) + hidden_states = tf.reshape( + tf.transpose(hidden_states, perm=[0, 1, 3, 2, 4, 5]), [batch_size, pad_height, pad_width, -1] + ) + + if pad_height > height or pad_width > width: + hidden_states = hidden_states[:, :height, :width, :] + return hidden_states + + def call( + self, + hidden_states: tf.Tensor, + output_attentions: Optional[bool] = False, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor]: + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + if self.window_size > 0: + height, width = hidden_states.shape[1], hidden_states.shape[2] + hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size) + + hidden_states, attn_weights = self.attn( + hidden_states=hidden_states, + output_attentions=output_attentions, + training=training, + ) + if self.window_size > 0: + hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width)) + + hidden_states = residual + hidden_states + layernorm_output = self.layer_norm2(hidden_states) + hidden_states = hidden_states + self.mlp(layernorm_output) + + outputs = (hidden_states,) + if output_attentions: + outputs += (attn_weights,) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layer_norm1", None) is not None: + with tf.name_scope(self.layer_norm1.name): + self.layer_norm1.build([None, None, None, self.config.hidden_size]) + if getattr(self, "attn", None) is not None: + with tf.name_scope(self.attn.name): + self.attn.build(None) + if getattr(self, "layer_norm2", None) is not None: + with tf.name_scope(self.layer_norm2.name): + self.layer_norm2.build([None, None, None, self.config.hidden_size]) + if getattr(self, "mlp", None) is not None: + with tf.name_scope(self.mlp.name): + self.mlp.build(None) + + +class TFSamVisionNeck(keras.layers.Layer): + def __init__(self, config: SamVisionConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + + self.conv1 = keras.layers.Conv2D( + config.output_channels, + kernel_size=1, + use_bias=False, + name="conv1", + ) + self.layer_norm1 = TFSamLayerNorm(config.output_channels, name="layer_norm1") + self.conv2 = keras.layers.Conv2D( + config.output_channels, + kernel_size=3, + padding="same", + use_bias=False, + name="conv2", + ) + self.layer_norm2 = TFSamLayerNorm(config.output_channels, name="layer_norm2") + + def call(self, hidden_states): + hidden_states = self.conv1(hidden_states) + hidden_states = self.layer_norm1(hidden_states) + + hidden_states = self.conv2(hidden_states) + hidden_states = self.layer_norm2(hidden_states) + hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "conv1", None) is not None: + with tf.name_scope(self.conv1.name): + self.conv1.build([None, None, None, self.config.hidden_size]) + if getattr(self, "layer_norm1", None) is not None: + with tf.name_scope(self.layer_norm1.name): + self.layer_norm1.build(None) + if getattr(self, "conv2", None) is not None: + with tf.name_scope(self.conv2.name): + self.conv2.build([None, None, None, self.config.output_channels]) + if getattr(self, "layer_norm2", None) is not None: + with tf.name_scope(self.layer_norm2.name): + self.layer_norm2.build(None) + + +class TFSamVisionEncoder(keras.layers.Layer): + def __init__(self, config: SamVisionConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.image_size = config.image_size + + self.patch_embed = TFSamPatchEmbeddings(config, name="patch_embed") + + self.pos_embed = None + + self.layers = [] + for i in range(config.num_hidden_layers): + layer = TFSamVisionLayer( + config, + window_size=config.window_size if i not in config.global_attn_indexes else 0, + name=f"layers_._{i}", + ) + self.layers.append(layer) + + self.neck = TFSamVisionNeck(config, name="neck") + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if self.config.use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = self.add_weight( + shape=[ + 1, + self.config.image_size // self.config.patch_size, + self.config.image_size // self.config.patch_size, + self.config.hidden_size, + ], + initializer="zeros", + trainable=True, + name="pos_embed", + ) + + if getattr(self, "patch_embed", None) is not None: + with tf.name_scope(self.patch_embed.name): + self.patch_embed.build(None) + if getattr(self, "neck", None) is not None: + with tf.name_scope(self.neck.name): + self.neck.build(None) + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + def get_input_embeddings(self): + return self.patch_embed + + def call( + self, + pixel_values: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFSamVisionEncoderOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + hidden_states = self.patch_embed(pixel_values) + if self.pos_embed is not None: + hidden_states = hidden_states + self.pos_embed + + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = layer_module(hidden_states, output_attentions=output_attentions, training=training) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + hidden_states = self.neck(hidden_states) + + if not return_dict: + outputs = (hidden_states,) + if output_hidden_states: + outputs = outputs + (all_hidden_states,) + if output_attentions: + outputs = outputs + (all_self_attentions,) + return outputs + + return TFSamVisionEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class TFSamPreTrainedModel(TFPreTrainedModel): + config_class = SamConfig + base_model_prefix = "sam" + main_input_name = "pixel_values" + + +SAM_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a TensorFlow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) + subclass. Use it as a regular TensorFlow Model and refer to the TensorFlow documentation for all matter related to + general usage and behavior. + + Parameters: + config ([`SamConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +SAM_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`SamProcessor`]. See [`SamProcessor.__call__`] for + details. + input_points (`tf.Tensor` of shape `(batch_size, num_points, 2)`): + Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much + better results. The points can be obtained by passing a list of list of list to the processor that will + create corresponding `tf` tensors of dimension 4. The first dimension is the image batch size, the second + dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per + input point), the third dimension is the number of points per segmentation mask (it is possible to pass + multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) + coordinates of the point. If a different number of points is passed either for each image, or for each + mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the + computation of the embedding will be skipped for these points using the labels. + input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points)`): + Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the + official implementation, there are 3 types of labels + + - `1`: the point is a point that contains the object of interest + - `0`: the point is a point that does not contain the object of interest + - `-1`: the point corresponds to the background + + We added the label: + + - `-10`: the point is a padding point, thus should be ignored by the prompt encoder + + The padding labels should be automatically done by the processor. + input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes, 4)`): + Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to + much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, + that will generate a `tf` tensor, with each dimension corresponding respectively to the image batch size, + the number of boxes per image and the coordinates of the top left and botton right point of the box. In the + order (`x1`, `y1`, `x2`, `y2`): + + - `x1`: the x coordinate of the top left point of the input box + - `y1`: the y coordinate of the top left point of the input box + - `x2`: the x coordinate of the bottom right point of the input box + - `y2`: the y coordinate of the bottom right point of the input box + + input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): + SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to + generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be + manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). + + image_embeddings (`tf.Tensor` of shape `(batch_size, output_channels, window_size, window_size)`): + Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory + efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` + method, and then feed them to the `call` method instead of feeding the `pixel_values`. + multimask_output (`bool`, *optional*): + In the original implementation and paper, the model always outputs 3 masks per image (or per point / per + bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the + "best" mask, by specifying `multimask_output=False`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "Segment Anything Model (SAM) for generating segmentation masks, given an input image and ", + " optional 2D location and bounding boxes.", + SAM_START_DOCSTRING, +) +class TFSamModel(TFSamPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"prompt_encoder.shared_embedding.positional_embedding"] + + def __init__(self, config, **kwargs): + super().__init__(config, **kwargs) + self.shared_image_embedding = TFSamPositionalEmbedding(config.vision_config, name="shared_image_embedding") + + self.vision_encoder = TFSamVisionEncoder(config.vision_config, name="vision_encoder") + self.prompt_encoder = TFSamPromptEncoder( + config.prompt_encoder_config, self.shared_image_embedding, name="prompt_encoder" + ) + self.mask_decoder = TFSamMaskDecoder(config.mask_decoder_config, name="mask_decoder") + self.config = config + + def get_input_embeddings(self): + return self.vision_encoder.get_input_embeddings() + + def get_image_wide_positional_embeddings(self): + size = self.config.prompt_encoder_config.image_embedding_size + grid = tf.ones((size, size)) + y_embed = tf.math.cumsum(grid, axis=0) - 0.5 + x_embed = tf.math.cumsum(grid, axis=1) - 0.5 + y_embed = y_embed / size + x_embed = x_embed / size + + positional_embedding = self.shared_image_embedding(tf.stack([x_embed, y_embed], axis=-1)) + return tf.expand_dims(tf.transpose(positional_embedding, perm=[2, 0, 1]), axis=0) # channel x height x width + + def get_image_embeddings( + self, + pixel_values, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns the image embeddings by passing the pixel values through the vision encoder. + + Args: + pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Input pixel values + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.TFModelOutput`] instead of a plain tuple. + + """ + vision_output = self.vision_encoder( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + image_embeddings = vision_output[0] + return image_embeddings + + def get_prompt_embeddings( + self, + input_points: tf.Tensor | None = None, + input_labels: tf.Tensor | None = None, + input_boxes: tf.Tensor | None = None, + input_masks: tf.Tensor | None = None, + ): + r""" + Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. + + Args: + input_points (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): + Optional input points for the prompt encoder. The padding of the point is automatically done by the + processor. `point_batch_size` refers to the number of masks that we want the model to predict per + point. The model will output `point_batch_size` times 3 masks in total. + input_labels (`tf.Tensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): + Optional input labels for the prompt encoder. The padding of the labels is automatically done by the + processor, or can be fed by the user. + input_boxes (`tf.Tensor` of shape `(batch_size, num_boxes_per_image, 4)`): + Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the + processor. users can also pass manually the input boxes. + input_masks (`tf.Tensor` of shape `(batch_size, image_size, image_size)`): + Optional input masks for the prompt encoder. + """ + prompt_output = self.prompt_encoder( + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + input_masks=input_masks, + ) + return prompt_output + + @unpack_inputs + @add_start_docstrings_to_model_forward(SAM_INPUTS_DOCSTRING) + def call( + self, + pixel_values: TFModelInputType | None = None, + input_points: tf.Tensor | None = None, + input_labels: tf.Tensor | None = None, + input_boxes: tf.Tensor | None = None, + input_masks: tf.Tensor | None = None, + image_embeddings: tf.Tensor | None = None, + multimask_output: bool = True, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + return_dict: bool | None = None, + training: bool = False, + **kwargs, + ) -> TFSamImageSegmentationOutput | Tuple[tf.Tensor]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None and image_embeddings is None: + raise ValueError("Either pixel_values or image_embeddings must be provided.") + + if pixel_values is not None and image_embeddings is not None: + raise ValueError("Only one of pixel_values and image_embeddings can be provided.") + + if input_points is not None and len(input_points.shape) != 4: + raise ValueError( + "The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.", + " got {}.".format(input_points.shape), + ) + if input_boxes is not None and len(input_boxes.shape) != 3: + raise ValueError( + "The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.", + " got {}.".format(input_boxes.shape), + ) + if input_points is not None and input_boxes is not None: + point_batch_size = shape_list(input_points)[1] + box_batch_size = shape_list(input_boxes)[1] + if point_batch_size != box_batch_size: + raise ValueError( + "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( + point_batch_size, box_batch_size + ) + ) + if pixel_values is not None: + # Ensures that later checks pass even with an all-None shape from the serving signature + pixel_values = tf.ensure_shape( + pixel_values, + [ + None, + self.config.vision_config.num_channels, + self.config.vision_config.image_size, + self.config.vision_config.image_size, + ], + ) + image_positional_embeddings = self.get_image_wide_positional_embeddings() + # repeat with batch size + batch_size = shape_list(pixel_values)[0] if pixel_values is not None else shape_list(image_embeddings)[0] + image_positional_embeddings = tf.repeat(image_positional_embeddings, batch_size, axis=0) + + vision_attentions = None + vision_hidden_states = None + + if pixel_values is not None: + vision_outputs = self.vision_encoder( + pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=True, + training=training, + ) + image_embeddings = vision_outputs["last_hidden_state"] + + if output_hidden_states: + vision_hidden_states = vision_outputs["hidden_states"] + if output_attentions: + vision_attentions = vision_outputs["attentions"] + + if input_points is not None and input_labels is None: + input_labels = tf.ones_like(input_points[:, :, :, 0], dtype=tf.int32) + + if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]: + raise ValueError( + "The batch size of the image embeddings and the input points must be the same. ", + "Got {} and {} respectively.".format(image_embeddings.shape[0], input_points.shape[0]), + " if you want to pass multiple points for the same image, make sure that you passed ", + " input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ", + " input_labels of shape (batch_size, point_batch_size, num_points_per_image)", + ) + + sparse_embeddings, dense_embeddings = self.prompt_encoder( + batch_size=shape_list(image_embeddings)[0], + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + input_masks=input_masks, + ) + + low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder( + image_embeddings=image_embeddings, + image_positional_embeddings=image_positional_embeddings, + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + output_attentions=output_attentions, + ) + + if not return_dict: + output = (iou_predictions, low_res_masks) + if output_hidden_states: + output = output + (vision_hidden_states,) + + if output_attentions: + output = output + (vision_attentions, mask_decoder_attentions) + return output + + return TFSamImageSegmentationOutput( + iou_scores=iou_predictions, + pred_masks=low_res_masks, + vision_hidden_states=vision_hidden_states, + vision_attentions=vision_attentions, + mask_decoder_attentions=mask_decoder_attentions, + ) + + def serving_output(self, output: TFSamImageSegmentationOutput) -> TFSamImageSegmentationOutput: + hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None + + return TFSamImageSegmentationOutput( + iou_scores=output.iou_scores, + pred_masks=output.pred_masks, + vision_hidden_states=hs if self.config.output_hidden_states else None, + vision_attentions=attns if self.config.output_attentions else None, + mask_decoder_attentions=output.mask_decoder_attentions if self.config.output_attentions else None, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "shared_image_embedding", None) is not None: + with tf.name_scope(self.shared_image_embedding.name): + self.shared_image_embedding.build(None) + if getattr(self, "vision_encoder", None) is not None: + with tf.name_scope(self.vision_encoder.name): + self.vision_encoder.build(None) + if getattr(self, "prompt_encoder", None) is not None: + with tf.name_scope(self.prompt_encoder.name): + self.prompt_encoder.build(None) + if getattr(self, "mask_decoder", None) is not None: + with tf.name_scope(self.mask_decoder.name): + self.mask_decoder.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py b/venv/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..ed89ebeb3a035bdedfe0b1a50d6b564d346d4707 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/sam/processing_sam.py @@ -0,0 +1,266 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for SAM. +""" +from copy import deepcopy +from typing import Optional, Union + +import numpy as np + +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding +from ...utils import TensorType, is_tf_available, is_torch_available + + +if is_torch_available(): + import torch + +if is_tf_available(): + import tensorflow as tf + + +class SamProcessor(ProcessorMixin): + r""" + Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a + single processor. + + [`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of + [`~SamImageProcessor.__call__`] for more information. + + Args: + image_processor (`SamImageProcessor`): + An instance of [`SamImageProcessor`]. The image processor is a required input. + """ + + attributes = ["image_processor"] + image_processor_class = "SamImageProcessor" + + def __init__(self, image_processor): + super().__init__(image_processor) + self.current_processor = self.image_processor + self.point_pad_value = -10 + self.target_size = self.image_processor.size["longest_edge"] + + def __call__( + self, + images=None, + segmentation_maps=None, + input_points=None, + input_labels=None, + input_boxes=None, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchEncoding: + """ + This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D + points and bounding boxes for the model if they are provided. + """ + encoding_image_processor = self.image_processor( + images, + segmentation_maps=segmentation_maps, + return_tensors=return_tensors, + **kwargs, + ) + + # pop arguments that are not used in the foward but used nevertheless + original_sizes = encoding_image_processor["original_sizes"] + + if hasattr(original_sizes, "numpy"): # Checks if Torch or TF tensor + original_sizes = original_sizes.numpy() + + input_points, input_labels, input_boxes = self._check_and_preprocess_points( + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + ) + + encoding_image_processor = self._normalize_and_convert( + encoding_image_processor, + original_sizes, + input_points=input_points, + input_labels=input_labels, + input_boxes=input_boxes, + return_tensors=return_tensors, + ) + + return encoding_image_processor + + def _normalize_and_convert( + self, + encoding_image_processor, + original_sizes, + input_points=None, + input_labels=None, + input_boxes=None, + return_tensors="pt", + ): + if input_points is not None: + if len(original_sizes) != len(input_points): + input_points = [ + self._normalize_coordinates(self.target_size, point, original_sizes[0]) for point in input_points + ] + else: + input_points = [ + self._normalize_coordinates(self.target_size, point, original_size) + for point, original_size in zip(input_points, original_sizes) + ] + # check that all arrays have the same shape + if not all(point.shape == input_points[0].shape for point in input_points): + if input_labels is not None: + input_points, input_labels = self._pad_points_and_labels(input_points, input_labels) + + input_points = np.array(input_points) + + if input_labels is not None: + input_labels = np.array(input_labels) + + if input_boxes is not None: + if len(original_sizes) != len(input_boxes): + input_boxes = [ + self._normalize_coordinates(self.target_size, box, original_sizes[0], is_bounding_box=True) + for box in input_boxes + ] + else: + input_boxes = [ + self._normalize_coordinates(self.target_size, box, original_size, is_bounding_box=True) + for box, original_size in zip(input_boxes, original_sizes) + ] + input_boxes = np.array(input_boxes) + + if input_boxes is not None: + if return_tensors == "pt": + input_boxes = torch.from_numpy(input_boxes) + # boxes batch size of 1 by default + input_boxes = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes + elif return_tensors == "tf": + input_boxes = tf.convert_to_tensor(input_boxes) + # boxes batch size of 1 by default + input_boxes = tf.expand_dims(input_boxes, 1) if len(input_boxes.shape) != 3 else input_boxes + encoding_image_processor.update({"input_boxes": input_boxes}) + if input_points is not None: + if return_tensors == "pt": + input_points = torch.from_numpy(input_points) + # point batch size of 1 by default + input_points = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points + elif return_tensors == "tf": + input_points = tf.convert_to_tensor(input_points) + # point batch size of 1 by default + input_points = tf.expand_dims(input_points, 1) if len(input_points.shape) != 4 else input_points + encoding_image_processor.update({"input_points": input_points}) + if input_labels is not None: + if return_tensors == "pt": + input_labels = torch.from_numpy(input_labels) + # point batch size of 1 by default + input_labels = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels + elif return_tensors == "tf": + input_labels = tf.convert_to_tensor(input_labels) + # point batch size of 1 by default + input_labels = tf.expand_dims(input_labels, 1) if len(input_labels.shape) != 3 else input_labels + encoding_image_processor.update({"input_labels": input_labels}) + + return encoding_image_processor + + def _pad_points_and_labels(self, input_points, input_labels): + r""" + The method pads the 2D points and labels to the maximum number of points in the batch. + """ + expected_nb_points = max([point.shape[0] for point in input_points]) + processed_input_points = [] + for i, point in enumerate(input_points): + if point.shape[0] != expected_nb_points: + point = np.concatenate( + [point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value], axis=0 + ) + input_labels[i] = np.append(input_labels[i], [self.point_pad_value]) + processed_input_points.append(point) + input_points = processed_input_points + return input_points, input_labels + + def _normalize_coordinates( + self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False + ) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.image_processor._get_preprocess_shape(original_size, longest_edge=target_size) + coords = deepcopy(coords).astype(float) + + if is_bounding_box: + coords = coords.reshape(-1, 2, 2) + + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + + if is_bounding_box: + coords = coords.reshape(-1, 4) + + return coords + + def _check_and_preprocess_points( + self, + input_points=None, + input_labels=None, + input_boxes=None, + ): + r""" + Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they + are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`, + it is converted to a `numpy.ndarray` and then to a `list`. + """ + if input_points is not None: + if hasattr(input_points, "numpy"): # Checks for TF or Torch tensor + input_points = input_points.numpy().tolist() + + if not isinstance(input_points, list) or not isinstance(input_points[0], list): + raise ValueError("Input points must be a list of list of floating points.") + input_points = [np.array(input_point) for input_point in input_points] + else: + input_points = None + + if input_labels is not None: + if hasattr(input_labels, "numpy"): + input_labels = input_labels.numpy().tolist() + + if not isinstance(input_labels, list) or not isinstance(input_labels[0], list): + raise ValueError("Input labels must be a list of list integers.") + input_labels = [np.array(label) for label in input_labels] + else: + input_labels = None + + if input_boxes is not None: + if hasattr(input_boxes, "numpy"): + input_boxes = input_boxes.numpy().tolist() + + if ( + not isinstance(input_boxes, list) + or not isinstance(input_boxes[0], list) + or not isinstance(input_boxes[0][0], list) + ): + raise ValueError("Input boxes must be a list of list of list of floating points.") + input_boxes = [np.array(box).astype(np.float32) for box in input_boxes] + else: + input_boxes = None + + return input_points, input_labels, input_boxes + + @property + def model_input_names(self): + image_processor_input_names = self.image_processor.model_input_names + return list(dict.fromkeys(image_processor_input_names)) + + def post_process_masks(self, *args, **kwargs): + return self.image_processor.post_process_masks(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/swin/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39cace5d5e88752f92aefc9ef15101a2c7786c46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/__init__.py @@ -0,0 +1,86 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available + + +_import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_swin"] = [ + "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "SwinForImageClassification", + "SwinForMaskedImageModeling", + "SwinModel", + "SwinPreTrainedModel", + "SwinBackbone", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_swin"] = [ + "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFSwinForImageClassification", + "TFSwinForMaskedImageModeling", + "TFSwinModel", + "TFSwinPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_swin import ( + SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + SwinBackbone, + SwinForImageClassification, + SwinForMaskedImageModeling, + SwinModel, + SwinPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_swin import ( + TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + TFSwinForImageClassification, + TFSwinForMaskedImageModeling, + TFSwinModel, + TFSwinPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58030cd5c61fcfeffd1d6751be6231f78332dc08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5663d99cea43da0ecfd5f9959ac26ec0942a313 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/configuration_swin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_simmim_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_simmim_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5cdef86b9ac9a841c3da3b3e8d090a3e62cea68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_simmim_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_timm_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_timm_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f5a2b5a9ee220df9030c89a55e54dd311d86b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/convert_swin_timm_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_swin.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_swin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..197b397ea410dc7314d7a4680f80de8974e9421a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_swin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_tf_swin.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_tf_swin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa24eebdd97e197df816c300f775f37375d8554a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/swin/__pycache__/modeling_tf_swin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/configuration_swin.py b/venv/lib/python3.10/site-packages/transformers/models/swin/configuration_swin.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf460870f9ee0c7fedf8f28367e86718fb38bb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/configuration_swin.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Swin Transformer model configuration""" + +from collections import OrderedDict +from typing import Mapping + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging +from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class SwinConfig(BackboneConfigMixin, PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Swin + [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 4): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + embed_dim (`int`, *optional*, defaults to 96): + Dimensionality of patch embedding. + depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`): + Depth of each layer in the Transformer encoder. + num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`): + Number of attention heads in each layer of the Transformer encoder. + window_size (`int`, *optional*, defaults to 7): + Size of windows. + mlp_ratio (`float`, *optional*, defaults to 4.0): + Ratio of MLP hidden dimensionality to embedding dimensionality. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether or not a learnable bias should be added to the queries, keys and values. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings and encoder. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + drop_path_rate (`float`, *optional*, defaults to 0.1): + Stochastic depth rate. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, + `"selu"` and `"gelu_new"` are supported. + use_absolute_embeddings (`bool`, *optional*, defaults to `False`): + Whether or not to add absolute position embeddings to the patch embeddings. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + encoder_stride (`int`, *optional*, defaults to 32): + Factor to increase the spatial resolution by in the decoder head for masked image modeling. + out_features (`List[str]`, *optional*): + If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. + (depending on how many stages the model has). If unset and `out_indices` is set, will default to the + corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + out_indices (`List[int]`, *optional*): + If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how + many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. + If unset and `out_features` is unset, will default to the last stage. Must be in the + same order as defined in the `stage_names` attribute. + + Example: + + ```python + >>> from transformers import SwinConfig, SwinModel + + >>> # Initializing a Swin microsoft/swin-tiny-patch4-window7-224 style configuration + >>> configuration = SwinConfig() + + >>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration + >>> model = SwinModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "swin" + + attribute_map = { + "num_attention_heads": "num_heads", + "num_hidden_layers": "num_layers", + } + + def __init__( + self, + image_size=224, + patch_size=4, + num_channels=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + drop_path_rate=0.1, + hidden_act="gelu", + use_absolute_embeddings=False, + initializer_range=0.02, + layer_norm_eps=1e-5, + encoder_stride=32, + out_features=None, + out_indices=None, + **kwargs, + ): + super().__init__(**kwargs) + + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.embed_dim = embed_dim + self.depths = depths + self.num_layers = len(depths) + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + self.qkv_bias = qkv_bias + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.drop_path_rate = drop_path_rate + self.hidden_act = hidden_act + self.use_absolute_embeddings = use_absolute_embeddings + self.layer_norm_eps = layer_norm_eps + self.initializer_range = initializer_range + self.encoder_stride = encoder_stride + # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel + # this indicates the channel dimension after the last stage of the model + self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) + + +class SwinOnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_simmim_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_simmim_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..156b0ba86c524340fd2fb59a5f4762dfa874f722 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_simmim_to_pytorch.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Swin SimMIM checkpoints from the original repository. + +URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models""" + +import argparse + +import requests +import torch +from PIL import Image + +from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor + + +def get_swin_config(model_name): + config = SwinConfig(image_size=192) + + if "base" in model_name: + window_size = 6 + embed_dim = 128 + depths = (2, 2, 18, 2) + num_heads = (4, 8, 16, 32) + elif "large" in model_name: + window_size = 12 + embed_dim = 192 + depths = (2, 2, 18, 2) + num_heads = (6, 12, 24, 48) + else: + raise ValueError("Model not supported, only supports base and large variants") + + config.window_size = window_size + config.embed_dim = embed_dim + config.depths = depths + config.num_heads = num_heads + + return config + + +def rename_key(name): + if "encoder.mask_token" in name: + name = name.replace("encoder.mask_token", "embeddings.mask_token") + if "encoder.patch_embed.proj" in name: + name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection") + if "encoder.patch_embed.norm" in name: + name = name.replace("encoder.patch_embed.norm", "embeddings.norm") + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name: + name = name.replace("attn", "attention.self") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + + if name == "encoder.norm.weight": + name = "layernorm.weight" + if name == "encoder.norm.bias": + name = "layernorm.bias" + + if "decoder" in name: + pass + else: + name = "swin." + name + + return name + + +def convert_state_dict(orig_state_dict, model): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "attn_mask" in key: + pass + elif "qkv" in key: + key_split = key.split(".") + layer_num = int(key_split[2]) + block_num = int(key_split[4]) + dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size + + if "weight" in key: + orig_state_dict[ + f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" + ] = val[:dim, :] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[ + dim : dim * 2, : + ] + orig_state_dict[ + f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" + ] = val[-dim:, :] + else: + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[ + :dim + ] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ + dim : dim * 2 + ] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[ + -dim: + ] + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub): + state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] + + config = get_swin_config(model_name) + model = SwinForMaskedImageModeling(config) + model.eval() + + new_state_dict = convert_state_dict(state_dict, model) + model.load_state_dict(new_state_dict) + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + + image_processor = ViTImageProcessor(size={"height": 192, "width": 192}) + image = Image.open(requests.get(url, stream=True).raw) + inputs = image_processor(images=image, return_tensors="pt") + + with torch.no_grad(): + outputs = model(**inputs).logits + + print(outputs.keys()) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print(f"Pushing model and image processor for {model_name} to hub") + model.push_to_hub(f"microsoft/{model_name}") + image_processor.push_to_hub(f"microsoft/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="swin-base-simmim-window6-192", + type=str, + choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], + help="Name of the Swin SimMIM model you'd like to convert.", + ) + parser.add_argument( + "--checkpoint_path", + default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", + type=str, + help="Path to the original PyTorch checkpoint (.pth file).", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_timm_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_timm_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..828237490e0ebd7eb0df8fae05c8c81e5eed4f14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/convert_swin_timm_to_pytorch.py @@ -0,0 +1,173 @@ +import argparse +import json + +import requests +import timm +import torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification + + +def get_swin_config(swin_name): + config = SwinConfig() + name_split = swin_name.split("_") + + model_size = name_split[1] + img_size = int(name_split[4]) + window_size = int(name_split[3][-1]) + + if model_size == "tiny": + embed_dim = 96 + depths = (2, 2, 6, 2) + num_heads = (3, 6, 12, 24) + elif model_size == "small": + embed_dim = 96 + depths = (2, 2, 18, 2) + num_heads = (3, 6, 12, 24) + elif model_size == "base": + embed_dim = 128 + depths = (2, 2, 18, 2) + num_heads = (4, 8, 16, 32) + else: + embed_dim = 192 + depths = (2, 2, 18, 2) + num_heads = (6, 12, 24, 48) + + if "in22k" in swin_name: + num_classes = 21841 + else: + num_classes = 1000 + repo_id = "huggingface/label-files" + filename = "imagenet-1k-id2label.json" + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + config.image_size = img_size + config.num_labels = num_classes + config.embed_dim = embed_dim + config.depths = depths + config.num_heads = num_heads + config.window_size = window_size + + return config + + +def rename_key(name): + if "patch_embed.proj" in name: + name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") + if "patch_embed.norm" in name: + name = name.replace("patch_embed.norm", "embeddings.norm") + if "layers" in name: + name = "encoder." + name + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name: + name = name.replace("attn", "attention.self") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + + if name == "norm.weight": + name = "layernorm.weight" + if name == "norm.bias": + name = "layernorm.bias" + + if "head" in name: + name = name.replace("head", "classifier") + else: + name = "swin." + name + + return name + + +def convert_state_dict(orig_state_dict, model): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "mask" in key: + continue + elif "qkv" in key: + key_split = key.split(".") + layer_num = int(key_split[1]) + block_num = int(key_split[3]) + dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size + + if "weight" in key: + orig_state_dict[ + f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" + ] = val[:dim, :] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[ + dim : dim * 2, : + ] + orig_state_dict[ + f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" + ] = val[-dim:, :] + else: + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[ + :dim + ] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[ + dim : dim * 2 + ] + orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[ + -dim: + ] + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +def convert_swin_checkpoint(swin_name, pytorch_dump_folder_path): + timm_model = timm.create_model(swin_name, pretrained=True) + timm_model.eval() + + config = get_swin_config(swin_name) + model = SwinForImageClassification(config) + model.eval() + + new_state_dict = convert_state_dict(timm_model.state_dict(), model) + model.load_state_dict(new_state_dict) + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + + image_processor = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_", "-"))) + image = Image.open(requests.get(url, stream=True).raw) + inputs = image_processor(images=image, return_tensors="pt") + + timm_outs = timm_model(inputs["pixel_values"]) + hf_outs = model(**inputs).logits + + assert torch.allclose(timm_outs, hf_outs, atol=1e-3) + + print(f"Saving model {swin_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--swin_name", + default="swin_tiny_patch4_window7_224", + type=str, + help="Name of the Swin timm model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + + args = parser.parse_args() + convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_swin.py b/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_swin.py new file mode 100644 index 0000000000000000000000000000000000000000..c841faddf0df913ae3bf04096cf08d1fc80073c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_swin.py @@ -0,0 +1,1352 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Swin Transformer model.""" + + +import collections.abc +import math +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BackboneOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ...utils.backbone_utils import BackboneMixin +from .configuration_swin import SwinConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "SwinConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "microsoft/swin-tiny-patch4-window7-224" +_EXPECTED_OUTPUT_SHAPE = [1, 49, 768] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "microsoft/swin-tiny-patch4-window7-224" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# drop_path, SwinPatchEmbeddings, SwinPatchMerging and SwinDropPath are from the timm library. + + +@dataclass +class SwinEncoderOutput(ModelOutput): + """ + Swin encoder's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SwinModelOutput(ModelOutput): + """ + Swin model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): + Average pooling of the last layer hidden-state. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: Optional[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +@dataclass +class SwinMaskedImageModelingOutput(ModelOutput): + """ + Swin masked image model outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): + Masked image modeling (MLM) loss. + reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed pixel values. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + loss: Optional[torch.FloatTensor] = None + reconstruction: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + @property + def logits(self): + warnings.warn( + "logits attribute is deprecated and will be removed in version 5 of Transformers." + " Please use the reconstruction attribute to retrieve the final output instead.", + FutureWarning, + ) + return self.reconstruction + + +@dataclass +class SwinImageClassifierOutput(ModelOutput): + """ + Swin outputs for image classification. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + attentions: Optional[Tuple[torch.FloatTensor, ...]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None + + +def window_partition(input_feature, window_size): + """ + Partitions the given input into windows. + """ + batch_size, height, width, num_channels = input_feature.shape + input_feature = input_feature.view( + batch_size, height // window_size, window_size, width // window_size, window_size, num_channels + ) + windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) + return windows + + +def window_reverse(windows, window_size, height, width): + """ + Merges windows to produce higher resolution features. + """ + num_channels = windows.shape[-1] + windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) + windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) + return windows + + +class SwinEmbeddings(nn.Module): + """ + Construct the patch and position embeddings. Optionally, also the mask token. + """ + + def __init__(self, config, use_mask_token=False): + super().__init__() + + self.patch_embeddings = SwinPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.patch_grid = self.patch_embeddings.grid_size + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None + + if config.use_absolute_embeddings: + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) + else: + self.position_embeddings = None + + self.norm = nn.LayerNorm(config.embed_dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None + ) -> Tuple[torch.Tensor]: + embeddings, output_dimensions = self.patch_embeddings(pixel_values) + embeddings = self.norm(embeddings) + batch_size, seq_len, _ = embeddings.size() + + if bool_masked_pos is not None: + mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) + # replace the masked visual tokens by mask_tokens + mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1.0 - mask) + mask_tokens * mask + + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings, output_dimensions + + +class SwinPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.embed_dim + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def maybe_pad(self, pixel_values, height, width): + if width % self.patch_size[1] != 0: + pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + if height % self.patch_size[0] != 0: + pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + return pixel_values + + def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: + _, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + # pad the input to be divisible by self.patch_size, if needed + pixel_values = self.maybe_pad(pixel_values, height, width) + embeddings = self.projection(pixel_values) + _, _, height, width = embeddings.shape + output_dimensions = (height, width) + embeddings = embeddings.flatten(2).transpose(1, 2) + + return embeddings, output_dimensions + + +class SwinPatchMerging(nn.Module): + """ + Patch Merging Layer. + + Args: + input_resolution (`Tuple[int]`): + Resolution of input feature. + dim (`int`): + Number of input channels. + norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): + Normalization layer class. + """ + + def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def maybe_pad(self, input_feature, height, width): + should_pad = (height % 2 == 1) or (width % 2 == 1) + if should_pad: + pad_values = (0, 0, 0, width % 2, 0, height % 2) + input_feature = nn.functional.pad(input_feature, pad_values) + + return input_feature + + def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: + height, width = input_dimensions + # `dim` is height * width + batch_size, dim, num_channels = input_feature.shape + + input_feature = input_feature.view(batch_size, height, width, num_channels) + # pad input to be disible by width and height, if needed + input_feature = self.maybe_pad(input_feature, height, width) + # [batch_size, height/2, width/2, num_channels] + input_feature_0 = input_feature[:, 0::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_1 = input_feature[:, 1::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_2 = input_feature[:, 0::2, 1::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_3 = input_feature[:, 1::2, 1::2, :] + # batch_size height/2 width/2 4*num_channels + input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) + input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C + + input_feature = self.norm(input_feature) + input_feature = self.reduction(input_feature) + + return input_feature + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Swin +class SwinDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class SwinSelfAttention(nn.Module): + def __init__(self, config, dim, num_heads, window_size): + super().__init__() + if dim % num_heads != 0: + raise ValueError( + f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" + ) + + self.num_attention_heads = num_heads + self.attention_head_size = int(dim / num_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.window_size = ( + window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) + ) + + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) + ) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) + self.register_buffer("relative_position_index", relative_position_index) + + self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + batch_size, dim, num_channels = hidden_states.shape + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] + relative_position_bias = relative_position_bias.view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) + + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() + attention_scores = attention_scores + relative_position_bias.unsqueeze(0) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in SwinModel forward() function) + mask_shape = attention_mask.shape[0] + attention_scores = attention_scores.view( + batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim + ) + attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) + attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +class SwinSelfOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, dim) + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +class SwinAttention(nn.Module): + def __init__(self, config, dim, num_heads, window_size): + super().__init__() + self.self = SwinSelfAttention(config, dim, num_heads, window_size) + self.output = SwinSelfOutput(config, dim) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class SwinIntermediate(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class SwinOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class SwinLayer(nn.Module): + def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.shift_size = shift_size + self.window_size = config.window_size + self.input_resolution = input_resolution + self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.attention = SwinAttention(config, dim, num_heads, window_size=self.window_size) + self.drop_path = SwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() + self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.intermediate = SwinIntermediate(config, dim) + self.output = SwinOutput(config, dim) + + def set_shift_and_window_size(self, input_resolution): + if min(input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(input_resolution) + + def get_attn_mask(self, height, width, dtype): + if self.shift_size > 0: + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, height, width, 1), dtype=dtype) + height_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + width_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + count = 0 + for height_slice in height_slices: + for width_slice in width_slices: + img_mask[:, height_slice, width_slice, :] = count + count += 1 + + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def maybe_pad(self, hidden_states, height, width): + pad_right = (self.window_size - width % self.window_size) % self.window_size + pad_bottom = (self.window_size - height % self.window_size) % self.window_size + pad_values = (0, 0, 0, pad_right, 0, pad_bottom) + hidden_states = nn.functional.pad(hidden_states, pad_values) + return hidden_states, pad_values + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + always_partition: Optional[bool] = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + if not always_partition: + self.set_shift_and_window_size(input_dimensions) + else: + pass + height, width = input_dimensions + batch_size, _, channels = hidden_states.size() + shortcut = hidden_states + + hidden_states = self.layernorm_before(hidden_states) + + hidden_states = hidden_states.view(batch_size, height, width, channels) + + # pad hidden_states to multiples of window size + hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) + + _, height_pad, width_pad, _ = hidden_states.shape + # cyclic shift + if self.shift_size > 0: + shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_hidden_states = hidden_states + + # partition windows + hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) + hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) + attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) + if attn_mask is not None: + attn_mask = attn_mask.to(hidden_states_windows.device) + + attention_outputs = self.attention( + hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) + shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) + + # reverse cyclic shift + if self.shift_size > 0: + attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + attention_windows = shifted_windows + + was_padded = pad_values[3] > 0 or pad_values[5] > 0 + if was_padded: + attention_windows = attention_windows[:, :height, :width, :].contiguous() + + attention_windows = attention_windows.view(batch_size, height * width, channels) + + hidden_states = shortcut + self.drop_path(attention_windows) + + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + layer_output = hidden_states + self.output(layer_output) + + layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) + return layer_outputs + + +class SwinStage(nn.Module): + def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): + super().__init__() + self.config = config + self.dim = dim + self.blocks = nn.ModuleList( + [ + SwinLayer( + config=config, + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + shift_size=0 if (i % 2 == 0) else config.window_size // 2, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) + else: + self.downsample = None + + self.pointing = False + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + always_partition: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + height, width = input_dimensions + for i, layer_module in enumerate(self.blocks): + layer_head_mask = head_mask[i] if head_mask is not None else None + + layer_outputs = layer_module( + hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition + ) + + hidden_states = layer_outputs[0] + + hidden_states_before_downsampling = hidden_states + if self.downsample is not None: + height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 + output_dimensions = (height, width, height_downsampled, width_downsampled) + hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) + else: + output_dimensions = (height, width, height, width) + + stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) + + if output_attentions: + stage_outputs += layer_outputs[1:] + return stage_outputs + + +class SwinEncoder(nn.Module): + def __init__(self, config, grid_size): + super().__init__() + self.num_layers = len(config.depths) + self.config = config + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] + self.layers = nn.ModuleList( + [ + SwinStage( + config=config, + dim=int(config.embed_dim * 2**i_layer), + input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), + depth=config.depths[i_layer], + num_heads=config.num_heads[i_layer], + drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], + downsample=SwinPatchMerging if (i_layer < self.num_layers - 1) else None, + ) + for i_layer in range(self.num_layers) + ] + ) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + output_hidden_states_before_downsampling: Optional[bool] = False, + always_partition: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, SwinEncoderOutput]: + all_hidden_states = () if output_hidden_states else None + all_reshaped_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + if output_hidden_states: + batch_size, _, hidden_size = hidden_states.shape + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) + reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + for i, layer_module in enumerate(self.layers): + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + input_dimensions, + layer_head_mask, + output_attentions, + always_partition, + ) + else: + layer_outputs = layer_module( + hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition + ) + + hidden_states = layer_outputs[0] + hidden_states_before_downsampling = layer_outputs[1] + output_dimensions = layer_outputs[2] + + input_dimensions = (output_dimensions[-2], output_dimensions[-1]) + + if output_hidden_states and output_hidden_states_before_downsampling: + batch_size, _, hidden_size = hidden_states_before_downsampling.shape + # rearrange b (h w) c -> b c h w + # here we use the original (not downsampled) height and width + reshaped_hidden_state = hidden_states_before_downsampling.view( + batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size + ) + reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) + all_hidden_states += (hidden_states_before_downsampling,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + elif output_hidden_states and not output_hidden_states_before_downsampling: + batch_size, _, hidden_size = hidden_states.shape + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) + reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + if output_attentions: + all_self_attentions += layer_outputs[3:] + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + + return SwinEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + reshaped_hidden_states=all_reshaped_hidden_states, + ) + + +class SwinPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SwinConfig + base_model_prefix = "swin" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +SWIN_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`SwinConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SWIN_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] + for details. + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Swin Model transformer outputting raw hidden-states without any specific head on top.", + SWIN_START_DOCSTRING, + """ + add_pooling_layer (`bool`, *optional*, defaults to `True`): + Whether or not to apply pooling layer. + use_mask_token (`bool`, *optional*, defaults to `False`): + Whether or not to create and apply mask tokens in the embedding layer. + """, +) +class SwinModel(SwinPreTrainedModel): + def __init__(self, config, add_pooling_layer=True, use_mask_token=False): + super().__init__(config) + self.config = config + self.num_layers = len(config.depths) + self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) + + self.embeddings = SwinEmbeddings(config, use_mask_token=use_mask_token) + self.encoder = SwinEncoder(config, self.embeddings.patch_grid) + + self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) + self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SwinModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SwinModelOutput]: + r""" + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, len(self.config.depths)) + + embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) + + encoder_outputs = self.encoder( + embedding_output, + input_dimensions, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + + pooled_output = None + if self.pooler is not None: + pooled_output = self.pooler(sequence_output.transpose(1, 2)) + pooled_output = torch.flatten(pooled_output, 1) + + if not return_dict: + output = (sequence_output, pooled_output) + encoder_outputs[1:] + + return output + + return SwinModelOutput( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, + ) + + +@add_start_docstrings( + """Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886). + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + """, + SWIN_START_DOCSTRING, +) +class SwinForMaskedImageModeling(SwinPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.swin = SwinModel(config, add_pooling_layer=False, use_mask_token=True) + + num_features = int(config.embed_dim * 2 ** (config.num_layers - 1)) + self.decoder = nn.Sequential( + nn.Conv2d( + in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1 + ), + nn.PixelShuffle(config.encoder_stride), + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SwinMaskedImageModelingOutput]: + r""" + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + Returns: + + Examples: + ```python + >>> from transformers import AutoImageProcessor, SwinForMaskedImageModeling + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-base-simmim-window6-192") + >>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-base-simmim-window6-192") + + >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 + >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values + >>> # create random boolean mask of shape (batch_size, num_patches) + >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() + + >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) + >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction + >>> list(reconstructed_pixel_values.shape) + [1, 3, 192, 192] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.swin( + pixel_values, + bool_masked_pos=bool_masked_pos, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + # Reshape to (batch_size, num_channels, height, width) + sequence_output = sequence_output.transpose(1, 2) + batch_size, num_channels, sequence_length = sequence_output.shape + height = width = math.floor(sequence_length**0.5) + sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) + + # Reconstruct pixel values + reconstructed_pixel_values = self.decoder(sequence_output) + + masked_im_loss = None + if bool_masked_pos is not None: + size = self.config.image_size // self.config.patch_size + bool_masked_pos = bool_masked_pos.reshape(-1, size, size) + mask = ( + bool_masked_pos.repeat_interleave(self.config.patch_size, 1) + .repeat_interleave(self.config.patch_size, 2) + .unsqueeze(1) + .contiguous() + ) + reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") + masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels + + if not return_dict: + output = (reconstructed_pixel_values,) + outputs[2:] + return ((masked_im_loss,) + output) if masked_im_loss is not None else output + + return SwinMaskedImageModelingOutput( + loss=masked_im_loss, + reconstruction=reconstructed_pixel_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + reshaped_hidden_states=outputs.reshaped_hidden_states, + ) + + +@add_start_docstrings( + """ + Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of + the [CLS] token) e.g. for ImageNet. + """, + SWIN_START_DOCSTRING, +) +class SwinForImageClassification(SwinPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.num_labels = config.num_labels + self.swin = SwinModel(config) + + # Classifier head + self.classifier = ( + nn.Linear(self.swin.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=SwinImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SwinImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.swin( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SwinImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + reshaped_hidden_states=outputs.reshaped_hidden_states, + ) + + +@add_start_docstrings( + """ + Swin backbone, to be used with frameworks like DETR and MaskFormer. + """, + SWIN_START_DOCSTRING, +) +class SwinBackbone(SwinPreTrainedModel, BackboneMixin): + def __init__(self, config: SwinConfig): + super().__init__(config) + super()._init_backbone(config) + + self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] + self.embeddings = SwinEmbeddings(config) + self.encoder = SwinEncoder(config, self.embeddings.patch_grid) + + # Add layer norms to hidden states of out_features + hidden_states_norms = {} + for stage, num_channels in zip(self._out_features, self.channels): + hidden_states_norms[stage] = nn.LayerNorm(num_channels) + self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def forward( + self, + pixel_values: torch.Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> BackboneOutput: + """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, AutoBackbone + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") + >>> model = AutoBackbone.from_pretrained( + ... "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] + ... ) + + >>> inputs = processor(image, return_tensors="pt") + >>> outputs = model(**inputs) + >>> feature_maps = outputs.feature_maps + >>> list(feature_maps[-1].shape) + [1, 768, 7, 7] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + + embedding_output, input_dimensions = self.embeddings(pixel_values) + + outputs = self.encoder( + embedding_output, + input_dimensions, + head_mask=None, + output_attentions=output_attentions, + output_hidden_states=True, + output_hidden_states_before_downsampling=True, + always_partition=True, + return_dict=True, + ) + + hidden_states = outputs.reshaped_hidden_states + + feature_maps = () + for stage, hidden_state in zip(self.stage_names, hidden_states): + if stage in self.out_features: + batch_size, num_channels, height, width = hidden_state.shape + hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() + hidden_state = hidden_state.view(batch_size, height * width, num_channels) + hidden_state = self.hidden_states_norms[stage](hidden_state) + hidden_state = hidden_state.view(batch_size, height, width, num_channels) + hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() + feature_maps += (hidden_state,) + + if not return_dict: + output = (feature_maps,) + if output_hidden_states: + output += (outputs.hidden_states,) + return output + + return BackboneOutput( + feature_maps=feature_maps, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_tf_swin.py b/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_tf_swin.py new file mode 100644 index 0000000000000000000000000000000000000000..b9a10793406916b965114697be17f55c29498f3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/swin/modeling_tf_swin.py @@ -0,0 +1,1631 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 Swin Transformer model.""" + + +from __future__ import annotations + +import collections.abc +import math +import warnings +from dataclasses import dataclass +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union + +import tensorflow as tf + +from ...activations_tf import ACT2FN +from ...modeling_tf_utils import ( + TFPreTrainedModel, + TFSequenceClassificationLoss, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_swin import SwinConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "SwinConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "microsoft/swin-tiny-patch4-window7-224" +_EXPECTED_OUTPUT_SHAPE = [1, 49, 768] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "microsoft/swin-tiny-patch4-window7-224" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# drop_path, TFSwinPatchEmbeddings, TFSwinPatchMerging and TFSwinDropPath are tensorflow +# implementations of PyTorch functionalities in the timm library. + + +@dataclass +class TFSwinEncoderOutput(ModelOutput): + """ + Swin encoder's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: tf.Tensor = None + hidden_states: Tuple[tf.Tensor, ...] | None = None + attentions: Tuple[tf.Tensor, ...] | None = None + reshaped_hidden_states: Tuple[tf.Tensor, ...] | None = None + + +@dataclass +class TFSwinModelOutput(ModelOutput): + """ + Swin model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): + Average pooling of the last layer hidden-state. + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: tf.Tensor = None + pooler_output: tf.Tensor | None = None + hidden_states: Tuple[tf.Tensor, ...] | None = None + attentions: Tuple[tf.Tensor, ...] | None = None + reshaped_hidden_states: Tuple[tf.Tensor, ...] | None = None + + +@dataclass +class TFSwinMaskedImageModelingOutput(ModelOutput): + """ + Swin masked image model outputs. + + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): + Masked image modeling (MLM) loss. + reconstruction (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Reconstructed pixel values. + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + loss: tf.Tensor | None = None + reconstruction: tf.Tensor = None + hidden_states: Tuple[tf.Tensor, ...] | None = None + attentions: Tuple[tf.Tensor, ...] | None = None + reshaped_hidden_states: Tuple[tf.Tensor, ...] | None = None + + @property + def logits(self): + warnings.warn( + "logits attribute is deprecated and will be removed in version 5 of Transformers." + " Please use the reconstruction attribute to retrieve the final output instead.", + FutureWarning, + ) + return self.reconstruction + + +@dataclass +class TFSwinImageClassifierOutput(ModelOutput): + """ + Swin outputs for image classification. + + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape + `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + loss: tf.Tensor | None = None + logits: tf.Tensor = None + hidden_states: Tuple[tf.Tensor, ...] | None = None + attentions: Tuple[tf.Tensor, ...] | None = None + reshaped_hidden_states: Tuple[tf.Tensor, ...] | None = None + + +def window_partition(input_feature: tf.Tensor, window_size: int) -> tf.Tensor: + """ + Partitions the given input into windows. + """ + batch_size, height, width, num_channels = shape_list(input_feature) + input_feature = tf.reshape( + input_feature, + (batch_size, height // window_size, window_size, width // window_size, window_size, num_channels), + ) + windows = tf.transpose(input_feature, (0, 1, 3, 2, 4, 5)) + windows = tf.reshape(windows, (-1, window_size, window_size, num_channels)) + return windows + + +def window_reverse(windows: tf.Tensor, window_size: int, height: int, width: int) -> tf.Tensor: + """ + Merges windows to produce higher resolution features. + """ + x = tf.shape(windows)[0] + y = tf.cast(height * width / (window_size * window_size), tf.int32) + batch_size = tf.math.floordiv(x, y) + windows = tf.reshape( + windows, (batch_size, height // window_size, width // window_size, window_size, window_size, -1) + ) + windows = tf.transpose(windows, (0, 1, 3, 2, 4, 5)) + windows = tf.reshape(windows, (batch_size, height, width, -1)) + return windows + + +def drop_path( + input: tf.Tensor, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True +) -> tf.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + input_shape = shape_list(input) + ndim = len(input_shape) + shape = [input_shape[0]] + [1] * (ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = tf.random.uniform(shape) + random_tensor = tf.where(random_tensor <= keep_prob, 1.0, 0.0) + if keep_prob > 0.0 and scale_by_keep: + random_tensor /= keep_prob + return input * random_tensor + + +class TFSwinEmbeddings(keras.layers.Layer): + """ + Construct the patch and position embeddings. Optionally, also the mask token. + """ + + def __init__(self, config: SwinConfig, use_mask_token: bool = False, **kwargs) -> None: + super().__init__(**kwargs) + self.patch_embeddings = TFSwinPatchEmbeddings(config, name="patch_embeddings") + self.num_patches = self.patch_embeddings.num_patches + self.patch_grid = self.patch_embeddings.grid_size + self.embed_dim = config.embed_dim + self.use_mask_token = use_mask_token + self.use_absolute_embeddings = config.use_absolute_embeddings + + self.norm = keras.layers.LayerNormalization(name="norm", epsilon=1e-5) + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") + self.config = config + + def build(self, input_shape: tf.TensorShape) -> None: + if self.use_mask_token: + self.mask_token = self.add_weight(shape=(1, 1, self.embed_dim), initializer="zeros", name="mask_token") + else: + self.mask_token = None + + if self.use_absolute_embeddings: + self.position_embeddings = self.add_weight( + (1, self.num_patches + 1, self.embed_dim), initializer="zeros", name="positional_embeddings" + ) + else: + self.position_embeddings = None + + if self.built: + return + self.built = True + if getattr(self, "patch_embeddings", None) is not None: + with tf.name_scope(self.patch_embeddings.name): + self.patch_embeddings.build(None) + if getattr(self, "norm", None) is not None: + with tf.name_scope(self.norm.name): + self.norm.build([None, None, self.config.embed_dim]) + if getattr(self, "dropout", None) is not None: + with tf.name_scope(self.dropout.name): + self.dropout.build(None) + + def call( + self, pixel_values: tf.Tensor, bool_masked_pos: bool = None, training: bool = False + ) -> Tuple[tf.Tensor, Tuple[int, int]]: + embeddings, output_dimensions = self.patch_embeddings(pixel_values, training=training) + embeddings = self.norm(embeddings, training=training) + batch_size, seq_len, _ = shape_list(embeddings) + + if bool_masked_pos is not None: + mask_tokens = tf.repeat(self.mask_token, batch_size, 0) + mask_tokens = tf.repeat(mask_tokens, seq_len, 1) + # replace the masked visual tokens by mask_tokens + mask = tf.expand_dims(bool_masked_pos, -1) + mask = tf.cast(mask, mask_tokens.dtype) + + embeddings = embeddings * (1.0 - mask) + mask_tokens * mask + + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings, training=training) + + return embeddings, output_dimensions + + +class TFSwinPatchEmbeddings(keras.layers.Layer): + """ + Image to Patch Embedding. + """ + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.embed_dim + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) + + self.projection = keras.layers.Conv2D( + filters=hidden_size, + kernel_size=self.patch_size, + strides=self.patch_size, + padding="valid", + name="projection", + ) + + def maybe_pad(self, pixel_values: tf.Tensor, height: int, width: int) -> tf.Tensor: + if width % self.patch_size[1] != 0: + pad_values = ((0, 0), (0, 0), (0, 0), (0, self.patch_size[1] - width % self.patch_size[1])) + pixel_values = tf.pad(pixel_values, pad_values) + if height % self.patch_size[0] != 0: + pad_values = ((0, 0), (0, 0), (0, self.patch_size[0] - height % self.patch_size[0]), (0, 0)) + pixel_values = tf.pad(pixel_values, pad_values) + return pixel_values + + def call(self, pixel_values: tf.Tensor, training: bool = False) -> Tuple[tf.Tensor, Tuple[int, int]]: + _, num_channels, height, width = shape_list(pixel_values) + if tf.executing_eagerly() and num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + # pad the input to be divisible by self.patch_size, if needed + pixel_values = self.maybe_pad(pixel_values, height, width) + + # B,C,H,W -> B,H,W,C + pixel_values = tf.transpose(pixel_values, (0, 2, 3, 1)) + + embeddings = self.projection(pixel_values, training=training) + + # B,H,W,C -> B,C,H,W + embeddings = tf.transpose(embeddings, (0, 3, 1, 2)) + + batch_size, channels, height, width = shape_list(embeddings) + output_dimensions = (height, width) + + embeddings = tf.reshape(embeddings, (batch_size, channels, -1)) + embeddings = tf.transpose(embeddings, (0, 2, 1)) + return embeddings, output_dimensions + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "projection", None) is not None: + with tf.name_scope(self.projection.name): + self.projection.build([None, None, None, self.num_channels]) + + +class TFSwinPatchMerging(keras.layers.Layer): + """ + Patch Merging Layer. + + Args: + input_resolution (`Tuple[int]`): + Resolution of input feature. + dim (`int`): + Number of input channels. + norm_layer (`keras.layer.Layer`, *optional*, defaults to `keras.layers.LayerNormalization`): + Normalization layer class. + """ + + def __init__( + self, input_resolution: Tuple[int, int], dim: int, norm_layer: Optional[Callable] = None, **kwargs + ) -> None: + super().__init__(**kwargs) + self.input_resolution = input_resolution + self.dim = dim + self.reduction = keras.layers.Dense(2 * dim, use_bias=False, name="reduction") + if norm_layer is None: + # Use same default epsilon as PyTorch + self.norm = keras.layers.LayerNormalization(epsilon=1e-5, name="norm") + else: + self.norm = norm_layer(name="norm") + + def maybe_pad(self, input_feature: tf.Tensor, height: int, width: int) -> tf.Tensor: + should_pad = (height % 2 == 1) or (width % 2 == 1) + if should_pad: + pad_values = ((0, 0), (0, height % 2), (0, width % 2), (0, 0)) + input_feature = tf.pad(input_feature, pad_values) + + return input_feature + + def call(self, input_feature: tf.Tensor, input_dimensions: Tuple[int, int], training: bool = False) -> tf.Tensor: + height, width = input_dimensions + # `dim` is height * width + batch_size, _, num_channels = shape_list(input_feature) + + input_feature = tf.reshape(input_feature, (batch_size, height, width, num_channels)) + # pad input to be disible by width and height, if needed + input_feature = self.maybe_pad(input_feature, height, width) + # [batch_size, height/2, width/2, num_channels] + input_feature_0 = input_feature[:, 0::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_1 = input_feature[:, 1::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_2 = input_feature[:, 0::2, 1::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_3 = input_feature[:, 1::2, 1::2, :] + # batch_size height/2 width/2 4*num_channels + input_feature = tf.concat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) + input_feature = tf.reshape( + input_feature, (batch_size, -1, 4 * num_channels) + ) # batch_size height/2*width/2 4*C + + input_feature = self.norm(input_feature, training=training) + input_feature = self.reduction(input_feature, training=training) + + return input_feature + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "reduction", None) is not None: + with tf.name_scope(self.reduction.name): + self.reduction.build([None, None, 4 * self.dim]) + if getattr(self, "norm", None) is not None: + with tf.name_scope(self.norm.name): + self.norm.build([None, None, 4 * self.dim]) + + +class TFSwinDropPath(keras.layers.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: float = None, scale_by_keep: bool = True, **kwargs) -> None: + super(TFSwinDropPath, self).__init__(**kwargs) + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def call(self, input: tf.Tensor, training: bool = False) -> tf.Tensor: + return drop_path(input, self.drop_prob, training, self.scale_by_keep) + + +class TFSwinSelfAttention(keras.layers.Layer): + def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: + super().__init__(**kwargs) + if dim % num_heads != 0: + raise ValueError( + f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" + ) + + self.num_attention_heads = num_heads + self.attention_head_size = int(dim / num_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + window_size = config.window_size + self.window_size = ( + window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) + ) + + self.query = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=config.qkv_bias, + name="query", + ) + self.key = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=config.qkv_bias, + name="key", + ) + self.value = keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=config.qkv_bias, + name="value", + ) + + self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) + + def build(self, input_shape: tf.TensorShape) -> None: + self.relative_position_bias_table = self.add_weight( + shape=(((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1)), self.num_attention_heads), + initializer="zeros", + name="relative_position_bias_table", + ) + self.relative_position_index = self.add_weight( + shape=(self.window_size[0] ** 2, self.window_size[1] ** 2), + trainable=False, + dtype=tf.int32, + name="relative_position_index", + ) + + # get pair-wise relative position index for each token inside the window + coords_h = tf.range(self.window_size[0]) + coords_w = tf.range(self.window_size[1]) + coords = tf.stack(tf.meshgrid(coords_h, coords_w, indexing="ij")) + coords_flatten = tf.reshape(coords, (shape_list(coords)[0], -1)) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = tf.transpose(relative_coords, (1, 2, 0)) + + stack_0, stack_1 = tf.unstack(relative_coords, axis=2) + stack_0 += self.window_size[0] - 1 + stack_0 *= 2 * self.window_size[1] - 1 + stack_1 += self.window_size[1] - 1 + relative_coords = tf.stack([stack_0, stack_1], axis=2) + + self.relative_position_index.assign(tf.cast(tf.reduce_sum(relative_coords, axis=-1), tf.int32)) + + if self.built: + return + self.built = True + if getattr(self, "query", None) is not None: + with tf.name_scope(self.query.name): + self.query.build([None, None, self.all_head_size]) + if getattr(self, "key", None) is not None: + with tf.name_scope(self.key.name): + self.key.build([None, None, self.all_head_size]) + if getattr(self, "value", None) is not None: + with tf.name_scope(self.value.name): + self.value.build([None, None, self.all_head_size]) + + def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: + new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] + x = tf.reshape(x, new_x_shape) + return tf.transpose(x, (0, 2, 1, 3)) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: bool = False, + training: bool = False, + ) -> Tuple[tf.Tensor, ...]: + batch_size, dim, _ = shape_list(hidden_states) + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, (0, 1, 3, 2))) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + relative_position_bias = tf.gather( + self.relative_position_bias_table, tf.reshape(self.relative_position_index, (-1,)) + ) + relative_position_bias = tf.reshape( + relative_position_bias, + (self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1), + ) + + relative_position_bias = tf.transpose(relative_position_bias, (2, 0, 1)) + attention_scores = attention_scores + tf.expand_dims(relative_position_bias, 0) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in SwinModel call() function) + mask_shape = shape_list(attention_mask)[0] + attention_scores = tf.reshape( + attention_scores, (batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) + ) + attention_mask = tf.expand_dims(attention_mask, 1) + attention_mask = tf.expand_dims(attention_mask, 0) + attention_scores = attention_scores + attention_mask + attention_scores = tf.reshape(attention_scores, (-1, self.num_attention_heads, dim, dim)) + + # Normalize the attention scores to probabilities. + attention_probs = tf.nn.softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = tf.matmul(attention_probs, value_layer) + context_layer = tf.transpose(context_layer, (0, 2, 1, 3)) + new_context_layer_shape = shape_list(context_layer)[:-2] + [ + self.all_head_size, + ] + context_layer = tf.reshape(context_layer, new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +class TFSwinSelfOutput(keras.layers.Layer): + def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = keras.layers.Dense(dim, name="dense") + self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob, name="dropout") + self.dim = dim + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.dim]) + if getattr(self, "dropout", None) is not None: + with tf.name_scope(self.dropout.name): + self.dropout.build(None) + + +class TFSwinAttention(keras.layers.Layer): + def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: + super().__init__(**kwargs) + self.self = TFSwinSelfAttention(config, dim, num_heads, name="self") + self.self_output = TFSwinSelfOutput(config, dim, name="output") + self.pruned_heads = set() + + def prune_heads(self, heads): + """ + Prunes heads of the model. See base class PreTrainedModel heads: dict of {layer_num: list of heads to prune in + this layer} + """ + raise NotImplementedError + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: bool = False, + training: bool = False, + ) -> tf.Tensor: + self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions, training=training) + attention_output = self.self_output(self_outputs[0], hidden_states, training=training) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self", None) is not None: + with tf.name_scope(self.self.name): + self.self.build(None) + if getattr(self, "self_output", None) is not None: + with tf.name_scope(self.self_output.name): + self.self_output.build(None) + + +class TFSwinIntermediate(keras.layers.Layer): + def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = keras.layers.Dense(int(config.mlp_ratio * dim), name="dense") + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + self.dim = dim + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.dim]) + + +class TFSwinOutput(keras.layers.Layer): + def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = keras.layers.Dense(dim, name="dense") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, "dropout") + self.config = config + self.dim = dim + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, int(self.config.mlp_ratio * self.dim)]) + + +class TFSwinLayer(keras.layers.Layer): + def __init__( + self, config, dim, input_resolution: Tuple[int, int], num_heads: int, shift_size: int = 0, **kwargs + ) -> None: + super().__init__(**kwargs) + self.chunk_size_feed_forward = config.chunk_size_feed_forward + min_res = tf.reduce_min(input_resolution) + self.window_size = min_res if min_res <= config.window_size else config.window_size + self.shift_size = 0 if min_res <= self.window_size else shift_size + self.input_resolution = input_resolution + + self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") + self.attention = TFSwinAttention(config, dim, num_heads, name="attention") + self.drop_path = ( + TFSwinDropPath(config.drop_path_rate, name="drop_path") + if config.drop_path_rate > 0.0 + else keras.layers.Activation("linear", name="drop_path") + ) + self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") + self.intermediate = TFSwinIntermediate(config, dim, name="intermediate") + self.swin_output = TFSwinOutput(config, dim, name="output") + self.dim = dim + + def get_attn_mask(self, height: int, width: int, window_size: int, shift_size: int) -> tf.Tensor | None: + img_mask = tf.zeros((height, width)) + height_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) + width_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) + + # calculate attention mask for SW-MSA + if shift_size > 0: + count = 0 + for height_slice in height_slices: + for width_slice in width_slices: + height_inds = tf.range(height_slice[0] % height, height_slice[1] % height + 1) + width_inds = tf.range(width_slice[0] % width, width_slice[1] % width + 1) + indices = tf.reshape(tf.stack(tf.meshgrid(height_inds, width_inds), axis=-1), (-1, 2)) + if len(indices) >= 1: + updates = tf.ones((len(indices),), dtype=img_mask.dtype) * count + img_mask = tf.tensor_scatter_nd_update(img_mask, indices, updates) + count += 1 + + img_mask = tf.expand_dims(img_mask, -1) + img_mask = tf.expand_dims(img_mask, 0) + + mask_windows = window_partition(img_mask, window_size) + mask_windows = tf.reshape(mask_windows, (-1, window_size * window_size)) + attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2) + attn_mask = tf.where(attn_mask != 0, float(-100.0), attn_mask) + attn_mask = tf.where(attn_mask == 0, float(0.0), attn_mask) + return attn_mask + + def maybe_pad( + self, hidden_states: tf.Tensor, window_size: int, height: int, width: int + ) -> Tuple[tf.Tensor, tf.Tensor]: + pad_right = (window_size - width % window_size) % window_size + pad_bottom = (window_size - height % window_size) % window_size + pad_values = [[0, 0], [0, pad_bottom], [0, pad_right], [0, 0]] + hidden_states = tf.pad(hidden_states, pad_values) + pad_values = tf.reshape(pad_values, (-1,)) + return hidden_states, pad_values + + def call( + self, + hidden_states: tf.Tensor, + input_dimensions: Tuple[int, int], + head_mask: tf.Tensor | None = None, + output_attentions: bool = False, + training: bool = False, + ) -> tf.Tensor: + # if window size is larger than input resolution, we don't partition windows + min_res = tf.reduce_min(input_dimensions) + shift_size = 0 if min_res <= self.window_size else self.shift_size + window_size = min_res if min_res <= self.window_size else self.window_size + + height, width = input_dimensions + batch_size, _, channels = shape_list(hidden_states) + shortcut = hidden_states + + hidden_states = self.layernorm_before(hidden_states, training=training) + hidden_states = tf.reshape(hidden_states, (batch_size, height, width, channels)) + # pad hidden_states to multiples of window size + hidden_states, pad_values = self.maybe_pad(hidden_states, window_size, height, width) + + _, height_pad, width_pad, _ = shape_list(hidden_states) + # cyclic shift + if shift_size > 0: + shifted_hidden_states = tf.roll(hidden_states, shift=(-shift_size, -shift_size), axis=(1, 2)) + else: + shifted_hidden_states = hidden_states + + # partition windows + hidden_states_windows = window_partition(shifted_hidden_states, window_size) + hidden_states_windows = tf.reshape(hidden_states_windows, (-1, window_size * window_size, channels)) + attn_mask = self.get_attn_mask( + height=height_pad, width=width_pad, window_size=window_size, shift_size=shift_size + ) + + attention_outputs = self.attention( + hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions, training=training + ) + + attention_output = attention_outputs[0] + + attention_windows = tf.reshape(attention_output, (-1, window_size, window_size, channels)) + shifted_windows = window_reverse(attention_windows, window_size, height_pad, width_pad) + + # reverse cyclic shift + if shift_size > 0: + attention_windows = tf.roll(shifted_windows, shift=(shift_size, shift_size), axis=(1, 2)) + else: + attention_windows = shifted_windows + + was_padded = pad_values[3] > 0 or pad_values[5] > 0 + if was_padded: + attention_windows = attention_windows[:, :height, :width, :] + + attention_windows = tf.reshape(attention_windows, (batch_size, height * width, channels)) + + hidden_states = shortcut + self.drop_path(attention_windows, training=training) + + layer_output = self.layernorm_after(hidden_states, training=training) + layer_output = self.intermediate(layer_output) + layer_output = hidden_states + self.swin_output(layer_output, training=training) + + layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) + return layer_outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layernorm_before", None) is not None: + with tf.name_scope(self.layernorm_before.name): + self.layernorm_before.build([None, None, self.dim]) + if getattr(self, "attention", None) is not None: + with tf.name_scope(self.attention.name): + self.attention.build(None) + if getattr(self, "drop_path", None) is not None: + with tf.name_scope(self.drop_path.name): + self.drop_path.build(None) + if getattr(self, "layernorm_after", None) is not None: + with tf.name_scope(self.layernorm_after.name): + self.layernorm_after.build([None, None, self.dim]) + if getattr(self, "intermediate", None) is not None: + with tf.name_scope(self.intermediate.name): + self.intermediate.build(None) + if getattr(self, "swin_output", None) is not None: + with tf.name_scope(self.swin_output.name): + self.swin_output.build(None) + + +class TFSwinStage(keras.layers.Layer): + def __init__( + self, + config: SwinConfig, + dim: int, + input_resolution: Tuple[int, int], + depth: int, + num_heads: int, + drop_path: List[float], + downsample: Optional[Callable], + **kwargs, + ) -> None: + super().__init__(**kwargs) + self.config = config + self.dim = dim + self.blocks = [ + TFSwinLayer( + config=config, + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + shift_size=0 if (i % 2 == 0) else config.window_size // 2, + name=f"blocks.{i}", + ) + for i in range(depth) + ] + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + input_resolution, + dim=dim, + norm_layer=partial(keras.layers.LayerNormalization, epsilon=1e-5), + name="downsample", + ) + else: + self.downsample = None + + self.pointing = False + + def call( + self, + hidden_states: tf.Tensor, + input_dimensions: Tuple[int, int], + head_mask: tf.Tensor | None = None, + output_attentions: Optional[bool] = False, + training: bool = False, + ) -> Tuple[tf.Tensor, ...]: + height, width = input_dimensions + for i, layer_module in enumerate(self.blocks): + layer_head_mask = head_mask[i] if head_mask is not None else None + + layer_outputs = layer_module( + hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training + ) + + hidden_states = layer_outputs[0] + + if self.downsample is not None: + height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 + output_dimensions = (height, width, height_downsampled, width_downsampled) + hidden_states = self.downsample(layer_outputs[0], input_dimensions, training=training) + else: + output_dimensions = (height, width, height, width) + + stage_outputs = (hidden_states, output_dimensions) + + if output_attentions: + stage_outputs += layer_outputs[1:] + return stage_outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "downsample", None) is not None: + with tf.name_scope(self.downsample.name): + self.downsample.build(None) + if getattr(self, "blocks", None) is not None: + for layer in self.blocks: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFSwinEncoder(keras.layers.Layer): + def __init__(self, config: SwinConfig, grid_size: Tuple[int, int], **kwargs): + super().__init__(**kwargs) + self.num_layers = len(config.depths) + self.config = config + dpr = list((tf.linspace(0, 1, sum(config.depths)) * config.drop_path_rate).numpy()) + self.layers = [ + TFSwinStage( + config=config, + dim=int(config.embed_dim * 2**i_layer), + input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), + depth=config.depths[i_layer], + num_heads=config.num_heads[i_layer], + drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], + downsample=TFSwinPatchMerging if (i_layer < self.num_layers - 1) else None, + name=f"layers.{i_layer}", + ) + for i_layer in range(self.num_layers) + ] + + self.gradient_checkpointing = False + + def call( + self, + hidden_states: tf.Tensor, + input_dimensions: Tuple[int, int], + head_mask: tf.Tensor | None = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + training: bool = False, + ) -> Union[Tuple[tf.Tensor, ...], TFSwinEncoderOutput]: + all_input_dimensions = () + all_hidden_states = () if output_hidden_states else None + all_reshaped_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + if output_hidden_states: + batch_size, _, hidden_size = shape_list(hidden_states) + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) + reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + for i, layer_module in enumerate(self.layers): + layer_head_mask = head_mask[i] if head_mask is not None else None + + layer_outputs = layer_module( + hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training + ) + + hidden_states = layer_outputs[0] + output_dimensions = layer_outputs[1] + + input_dimensions = (output_dimensions[-2], output_dimensions[-1]) + all_input_dimensions += (input_dimensions,) + + if output_hidden_states: + batch_size, _, hidden_size = shape_list(hidden_states) + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) + reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + if output_attentions: + all_self_attentions += layer_outputs[2:] + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + + return TFSwinEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + reshaped_hidden_states=all_reshaped_hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFSwinPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SwinConfig + base_model_prefix = "swin" + main_input_name = "pixel_values" + + +SWIN_START_DOCSTRING = r""" + This model is a Tensorflow + [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a + regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`SwinConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SWIN_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] + for details. + head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +def normalize_data_format(value: str) -> str: + """ + From tensorflow addons + https://github.com/tensorflow/addons/blob/8cec33fcaaf1cf90aec7bdd55a0fcdbb251ce5c2/tensorflow_addons/utils/keras_utils.py#L71 + """ + if value is None: + value = keras.backend.image_data_format() + data_format = value.lower() + if data_format not in {"channels_first", "channels_last"}: + raise ValueError( + 'The `data_format` argument must be one of "channels_first", "channels_last". Received: ' + str(value) + ) + return data_format + + +class AdaptiveAveragePooling1D(keras.layers.Layer): + """ + Args: + Average 1D Pooling with adaptive kernel size. + output_size: An integer or tuple/list of a single integer, specifying pooled_features. + The new size of output channels. + data_format: A string, + one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. + `channels_last` corresponds to inputs with shape `(batch, steps, channels)` while `channels_first` corresponds + to inputs with shape `(batch, channels, steps)`. + Input shape: + - If `data_format='channels_last'`: 3D tensor with shape `(batch, steps, channels)`. + - If `data_format='channels_first'`: 3D tensor with shape `(batch, channels, steps)`. + Output shape: + - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, pooled_steps, channels)`. + - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, channels, pooled_steps)`. + + Adapted from [tensorflow-addon's adaptive pooling.py]( + https://github.com/tensorflow/addons/blob/8cec33fcaaf1cf90aec7bdd55a0fcdbb251ce5c2/tensorflow_addons/layers/adaptive_pooling.py#L90-L120 + ) + """ + + def __init__( + self, + output_size: Union[int, Iterable[int]], + reduce_function: Callable = tf.reduce_mean, + data_format: Optional[str] = None, + **kwargs, + ) -> None: + self.data_format = normalize_data_format(data_format) + self.reduce_function = reduce_function + self.output_size = (output_size,) if isinstance(output_size, int) else tuple(output_size) + super().__init__(**kwargs) + + def call(self, inputs: tf.Tensor, *args) -> None: + bins = self.output_size[0] + if self.data_format == "channels_last": + splits = tf.split(inputs, bins, axis=1) + splits = tf.stack(splits, axis=1) + out_vect = self.reduce_function(splits, axis=2) + else: + splits = tf.split(inputs, bins, axis=2) + splits = tf.stack(splits, axis=2) + out_vect = self.reduce_function(splits, axis=3) + return out_vect + + def compute_output_shape(self, input_shape: Iterable[int]) -> tf.TensorShape: + input_shape = tf.TensorShape(input_shape).as_list() + if self.data_format == "channels_last": + shape = tf.TensorShape([input_shape[0], self.output_size[0], input_shape[2]]) + else: + shape = tf.TensorShape([input_shape[0], input_shape[1], self.output_size[0]]) + return shape + + def get_config(self) -> Dict[str, Any]: + config = { + "output_size": self.output_size, + "data_format": self.data_format, + } + base_config = super().get_config() + return {**base_config, **config} + + +@keras_serializable +class TFSwinMainLayer(keras.layers.Layer): + config_class = SwinConfig + + def __init__( + self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs + ) -> None: + super().__init__(**kwargs) + self.config = config + self.num_layers = len(config.depths) + self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) + + self.embeddings = TFSwinEmbeddings(config, use_mask_token=use_mask_token, name="embeddings") + self.encoder = TFSwinEncoder(config, self.embeddings.patch_grid, name="encoder") + + self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") + self.pooler = AdaptiveAveragePooling1D(output_size=(1,)) if add_pooling_layer else None + + def get_input_embeddings(self) -> TFSwinPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List]): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_head_mask(self, head_mask: Optional[Any]) -> List: + if head_mask is not None: + raise NotImplementedError + return [None] * len(self.config.depths) + + @unpack_inputs + def call( + self, + pixel_values: tf.Tensor | None = None, + bool_masked_pos: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFSwinModelOutput, Tuple[tf.Tensor, ...]]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask) + embedding_output, input_dimensions = self.embeddings( + pixel_values, bool_masked_pos=bool_masked_pos, training=training + ) + + encoder_outputs = self.encoder( + embedding_output, + input_dimensions, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output, training=training) + + pooled_output = None + if self.pooler is not None: + batch_size, _, num_features = shape_list(sequence_output) + pooled_output = self.pooler(sequence_output) + pooled_output = tf.reshape(pooled_output, (batch_size, num_features)) + + if not return_dict: + output = (sequence_output, pooled_output) + encoder_outputs[1:] + return output + + return TFSwinModelOutput( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "layernorm", None) is not None: + with tf.name_scope(self.layernorm.name): + self.layernorm.build([None, None, self.num_features]) + + +@add_start_docstrings( + "The bare Swin Model transformer outputting raw hidden-states without any specific head on top.", + SWIN_START_DOCSTRING, +) +class TFSwinModel(TFSwinPreTrainedModel): + def __init__( + self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs + ) -> None: + super().__init__(config, **kwargs) + self.config = config + self.swin = TFSwinMainLayer(config, name="swin") + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFSwinModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + @unpack_inputs + def call( + self, + pixel_values: tf.Tensor | None = None, + bool_masked_pos: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFSwinModelOutput, Tuple[tf.Tensor, ...]]: + r""" + bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + swin_outputs = self.swin( + pixel_values=pixel_values, + bool_masked_pos=bool_masked_pos, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return swin_outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "swin", None) is not None: + with tf.name_scope(self.swin.name): + self.swin.build(None) + + +class TFSwinPixelShuffle(keras.layers.Layer): + """TF layer implementation of torch.nn.PixelShuffle""" + + def __init__(self, upscale_factor: int, **kwargs) -> None: + super().__init__(**kwargs) + if not isinstance(upscale_factor, int) or upscale_factor < 2: + raise ValueError(f"upscale_factor must be an integer value >= 2 got {upscale_factor}") + self.upscale_factor = upscale_factor + + def call(self, x: tf.Tensor) -> tf.Tensor: + hidden_states = x + batch_size, _, _, num_input_channels = shape_list(hidden_states) + block_size_squared = self.upscale_factor**2 + output_depth = int(num_input_channels / block_size_squared) + # When the number of output channels >= 2, PyTorch's PixelShuffle and + # TF's depth_to_space differ in their output as the order of channels selected for combining + # is a permutation of the other c.f. + # https://stackoverflow.com/questions/68272502/tf-depth-to-space-not-same-as-torchs-pixelshuffle-when-output-channels-1 + permutation = tf.constant( + [[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]] + ) + hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1) + hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format="NHWC") + return hidden_states + + +class TFSwinDecoder(keras.layers.Layer): + def __init__(self, config: SwinConfig, **kwargs): + super().__init__(**kwargs) + self.conv2d = keras.layers.Conv2D( + filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, strides=1, name="0" + ) + self.pixel_shuffle = TFSwinPixelShuffle(config.encoder_stride, name="1") + self.config = config + + def call(self, x: tf.Tensor) -> tf.Tensor: + hidden_states = x + # B,C,H,W -> B,H,W,C + hidden_states = tf.transpose(hidden_states, (0, 2, 3, 1)) + hidden_states = self.conv2d(hidden_states) + hidden_states = self.pixel_shuffle(hidden_states) + # B,H,W,C -> B,C,H,W + hidden_states = tf.transpose(hidden_states, (0, 3, 1, 2)) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "conv2d", None) is not None: + with tf.name_scope(self.conv2d.name): + self.conv2d.build([None, None, None, self.config.hidden_size]) + if getattr(self, "pixel_shuffle", None) is not None: + with tf.name_scope(self.pixel_shuffle.name): + self.pixel_shuffle.build(None) + + +@add_start_docstrings( + "Swin Model with a decoder on top for masked image modeling, as proposed in" + " [SimMIM](https://arxiv.org/abs/2111.09886).", + SWIN_START_DOCSTRING, +) +class TFSwinForMaskedImageModeling(TFSwinPreTrainedModel): + def __init__(self, config: SwinConfig): + super().__init__(config) + + self.swin = TFSwinMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="swin") + + self.decoder = TFSwinDecoder(config, name="decoder") + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) + @unpack_inputs + def call( + self, + pixel_values: tf.Tensor | None = None, + bool_masked_pos: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[Tuple, TFSwinMaskedImageModelingOutput]: + r""" + bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + Returns: + + Examples: + ```python + >>> from transformers import AutoImageProcessor, TFSwinForMaskedImageModeling + >>> import tensorflow as tf + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") + >>> model = TFSwinForMaskedImageModeling.from_pretrained("microsoft/swin-tiny-patch4-window7-224") + + >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 + >>> pixel_values = image_processor(images=image, return_tensors="tf").pixel_values + >>> # create random boolean mask of shape (batch_size, num_patches) + >>> bool_masked_pos = tf.random.uniform((1, num_patches)) >= 0.5 + + >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) + >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction + >>> list(reconstructed_pixel_values.shape) + [1, 3, 224, 224] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.swin( + pixel_values, + bool_masked_pos=bool_masked_pos, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + # Reshape to (batch_size, num_channels, height, width) + sequence_output = tf.transpose(sequence_output, (0, 2, 1)) + batch_size, num_channels, sequence_length = shape_list(sequence_output) + height = width = int(sequence_length**0.5) + sequence_output = tf.reshape(sequence_output, (batch_size, num_channels, height, width)) + + # Reconstruct pixel values + reconstructed_pixel_values = self.decoder(sequence_output) + + masked_im_loss = None + if bool_masked_pos is not None: + size = self.config.image_size // self.config.patch_size + bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size)) + mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1) + mask = tf.repeat(mask, self.config.patch_size, 2) + mask = tf.expand_dims(mask, 1) + mask = tf.cast(mask, tf.float32) + + reconstruction_loss = keras.losses.mean_absolute_error( + # Swap axes as metric calculation reduces over the final dimension + tf.transpose(pixel_values, (1, 2, 3, 0)), + tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)), + ) + reconstruction_loss = tf.expand_dims(reconstruction_loss, 0) + total_loss = tf.reduce_sum(reconstruction_loss * mask) + num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels + masked_im_loss = total_loss / num_masked_pixels + masked_im_loss = tf.reshape(masked_im_loss, (1,)) + + if not return_dict: + output = (reconstructed_pixel_values,) + outputs[2:] + return ((masked_im_loss,) + output) if masked_im_loss is not None else output + + return TFSwinMaskedImageModelingOutput( + loss=masked_im_loss, + reconstruction=reconstructed_pixel_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + reshaped_hidden_states=outputs.reshaped_hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "swin", None) is not None: + with tf.name_scope(self.swin.name): + self.swin.build(None) + if getattr(self, "decoder", None) is not None: + with tf.name_scope(self.decoder.name): + self.decoder.build(None) + + +@add_start_docstrings( + """ + Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of + the [CLS] token) e.g. for ImageNet. + """, + SWIN_START_DOCSTRING, +) +class TFSwinForImageClassification(TFSwinPreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config: SwinConfig): + super().__init__(config) + + self.num_labels = config.num_labels + self.swin = TFSwinMainLayer(config, name="swin") + + # Classifier head + self.classifier = ( + keras.layers.Dense(config.num_labels, name="classifier") + if config.num_labels > 0 + else keras.layers.Activation("linear", name="classifier") + ) + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=TFSwinImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + @unpack_inputs + def call( + self, + pixel_values: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + labels: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor, ...], TFSwinImageClassifierOutput]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.swin( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = outputs[1] + + logits = self.classifier(pooled_output, training=training) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFSwinImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + reshaped_hidden_states=outputs.reshaped_hidden_states, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "swin", None) is not None: + with tf.name_scope(self.swin.name): + self.swin.build(None) + if getattr(self, "classifier", None) is not None: + if hasattr(self.classifier, "name"): + with tf.name_scope(self.classifier.name): + self.classifier.build([None, None, self.swin.num_features])